--- /dev/null
+# Copyright (C) 2015 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import json
+import math
+import re
+
+
+class BenchmarkResults(object):
+
+ aggregators = {
+ 'Total': (lambda values: sum(values)),
+ 'Arithmetic': (lambda values: sum(values) / len(values)),
+ 'Geometric': (lambda values: math.exp(sum(map(math.log, values)) / len(values))),
+ }
+ metric_to_unit = {
+ 'FrameRate': 'fps',
+ 'Runs': '/s',
+ 'Time': 'ms',
+ 'Duration': 'ms',
+ 'Malloc': 'B',
+ 'Heap': 'B',
+ 'Allocations': 'B',
+ 'Score': 'pt',
+ }
+ SI_prefixes = ['n', 'u', 'm', '', 'K', 'M', 'G', 'T', 'P', 'E']
+
+ def __init__(self, results):
+ self._lint_results(results)
+ self._results = self._aggregate_results(results)
+
+ def format(self):
+ return self._format_tests(self._results)
+
+ @classmethod
+ def _format_tests(self, tests, indent=''):
+ output = ''
+ config_name = 'current'
+ for test_name in sorted(tests.keys()):
+ is_first = True
+ test = tests[test_name]
+ metrics = test.get('metrics', {})
+ for metric_name in sorted(metrics.keys()):
+ metric = metrics[metric_name]
+ for aggregator_name in sorted(metric.keys()):
+ output += indent
+ if is_first:
+ output += test_name
+ is_first = False
+ else:
+ output += ' ' * len(test_name)
+ output += ':' + metric_name + ':'
+ if aggregator_name:
+ output += aggregator_name + ':'
+ output += ' ' + self._format_values(metric_name, metric[aggregator_name][config_name]) + '\n'
+ if 'tests' in test:
+ output += self._format_tests(test['tests'], indent=(indent + ' ' * len(test_name)))
+ return output
+
+ @classmethod
+ def _format_values(cls, metric_name, values):
+ values = map(float, values)
+ total = sum(values)
+ mean = total / len(values)
+ square_sum = sum(map(lambda x: x * x, values))
+ sample_count = len(values)
+
+ # With sum and sum of squares, we can compute the sample standard deviation in O(1).
+ # See https://rniwa.com/2012-11-10/sample-standard-deviation-in-terms-of-sum-and-square-sum-of-samples/
+ if sample_count <= 1:
+ sample_stdev = 0
+ else:
+ sample_stdev = math.sqrt(square_sum / (sample_count - 1) - total * total / (sample_count - 1) / sample_count)
+
+ unit = cls._unit_from_metric(metric_name)
+
+ if unit == 'ms':
+ unit = 's'
+ mean = float(mean) / 1000
+ sample_stdev /= 1000
+
+ base = 1024 if unit == 'B' else 1000
+ value_sig_fig = 1 - math.floor(math.log10(sample_stdev / mean)) if sample_stdev else 3
+ SI_magnitude = math.floor(math.log(mean, base))
+
+ scaled_mean = mean * math.pow(base, -SI_magnitude)
+ SI_prefix = cls.SI_prefixes[int(SI_magnitude) + 3]
+
+ non_floating_digits = 1 + math.floor(math.log10(scaled_mean))
+ floating_points_count = max(0, value_sig_fig - non_floating_digits)
+ return ('{mean:.' + str(int(floating_points_count)) + 'f}{prefix}{unit} stdev={delta:.1%}').format(
+ mean=scaled_mean, delta=sample_stdev / mean, prefix=SI_prefix, unit=unit)
+
+ @classmethod
+ def _unit_from_metric(cls, metric_name):
+ # FIXME: Detect unknown mettric names
+ suffix = re.match(r'.*?([A-z][a-z]+|FrameRate)$', metric_name)
+ return cls.metric_to_unit[suffix.group(1)]
+
+ @classmethod
+ def _aggregate_results(cls, tests):
+ results = {}
+ for test_name, test in tests.iteritems():
+ results[test_name] = cls._aggregate_results_for_test(test)
+ return results
+
+ @classmethod
+ def _aggregate_results_for_test(cls, test):
+ subtest_results = cls._aggregate_results(test['tests']) if 'tests' in test else {}
+ results = {}
+ for metric_name, metric in test['metrics'].iteritems():
+ if not isinstance(metric, list):
+ results[metric_name] = {None: {}}
+ for config_name, values in metric.iteritems():
+ results[metric_name][None][config_name] = cls._flatten_list(values)
+ continue
+
+ aggregator_list = metric
+ results[metric_name] = {}
+ for aggregator in aggregator_list:
+ values_by_config_iteration = cls._subtest_values_by_config_iteration(subtest_results, metric_name, aggregator)
+ for config_name, values_by_iteration in values_by_config_iteration.iteritems():
+ results[metric_name].setdefault(aggregator, {})
+ results[metric_name][aggregator][config_name] = [cls._aggregate_values(aggregator, values) for values in values_by_iteration]
+
+ return {'metrics': results, 'tests': subtest_results}
+
+ @classmethod
+ def _flatten_list(cls, nested_list):
+ flattened_list = []
+ for item in nested_list:
+ if isinstance(item, list):
+ flattened_list += cls._flatten_list(item)
+ else:
+ flattened_list.append(item)
+ return flattened_list
+
+ @classmethod
+ def _subtest_values_by_config_iteration(cls, subtest_results, metric_name, aggregator):
+ values_by_config_iteration = {}
+ for subtest_name, subtest in subtest_results.iteritems():
+ results_for_metric = subtest['metrics'].get(metric_name, {})
+ results_for_aggregator = results_for_metric.get(aggregator, results_for_metric.get(None, {}))
+ for config_name, values in results_for_aggregator.iteritems():
+ values_by_config_iteration.setdefault(config_name, [[] for _ in values])
+ for iteration, value in enumerate(values):
+ values_by_config_iteration[config_name][iteration].append(value)
+ return values_by_config_iteration
+
+ @classmethod
+ def _aggregate_values(cls, aggregator, values):
+ return cls.aggregators[aggregator](values)
+
+ @classmethod
+ def _lint_results(cls, tests):
+ cls._lint_subtest_results(tests, None)
+ return True
+
+ @classmethod
+ def _lint_subtest_results(cls, subtests, parent_needing_aggregation):
+ iteration_groups_by_config = {}
+ for test_name, test in subtests.iteritems():
+ if 'metrics' not in test:
+ raise TypeError('"%s" does not contain metrics' % test_name)
+
+ metrics = test['metrics']
+ if not isinstance(metrics, dict):
+ raise TypeError('The metrics in "%s" is not a dictionary' % test_name)
+
+ needs_aggregation = False
+ for metric_name, metric in metrics.iteritems():
+ if isinstance(metric, list):
+ cls._lint_aggregator_list(test_name, metric_name, metric)
+ needs_aggregation = True
+ elif isinstance(metric, dict):
+ cls._lint_configuration(test_name, metric_name, metric, parent_needing_aggregation, iteration_groups_by_config)
+ else:
+ raise TypeError('"%s" metric of "%s" was not an aggregator list or a dictionary of configurations: %s' % (metric_name, test_name, str(metric)))
+
+ if 'tests' in test:
+ cls._lint_subtest_results(test['tests'], test_name if needs_aggregation else None)
+ elif needs_aggregation:
+ raise TypeError('"%s" requires aggregation but "SomeTest" has no subtests' % (test_name))
+ return iteration_groups_by_config
+
+ @classmethod
+ def _lint_aggregator_list(cls, test_name, metric_name, aggregator_list):
+ if len(aggregator_list) != len(set(aggregator_list)):
+ raise TypeError('"%s" metric of "%s" had invalid aggregator list: %s' % (metric_name, test_name, json.dumps(aggregator_list)))
+ if not aggregator_list:
+ raise TypeError('The aggregator list is empty in "%s" metric of "%s"' % (metric_name, test_name))
+ for aggregator_name in aggregator_list:
+ if cls._is_numeric(aggregator_name):
+ raise TypeError('"%s" metric of "%s" is not wrapped by a configuration; e.g. "current"' % (metric_name, test_name))
+ if aggregator_name not in cls.aggregators:
+ raise TypeError('"%s" metric of "%s" uses unknown aggregator: %s' % (metric_name, test_name, aggregator_name))
+
+ @classmethod
+ def _lint_configuration(cls, test_name, metric_name, configurations, parent_needing_aggregation, iteration_groups_by_config):
+ # FIXME: Check that config_name is always "current".
+ for config_name, values in configurations.iteritems():
+ nested_list_count = [isinstance(value, list) for value in values].count(True)
+ if nested_list_count not in [0, len(values)]:
+ raise TypeError('"%s" metric of "%s" had malformed values: %s' % (metric_name, test_name, json.dumps(values)))
+
+ if nested_list_count:
+ value_shape = []
+ for value_group in values:
+ value_shape.append(len(value_group))
+ cls._lint_values(test_name, metric_name, value_group)
+ else:
+ value_shape = len(values)
+ cls._lint_values(test_name, metric_name, values)
+
+ iteration_groups_by_config.setdefault(metric_name, {}).setdefault(config_name, value_shape)
+ if parent_needing_aggregation and value_shape != iteration_groups_by_config[metric_name][config_name]:
+ raise TypeError('"%s" metric of "%s" had a mismatching subtest values' % (metric_name, parent_needing_aggregation))
+
+ @classmethod
+ def _lint_values(cls, test_name, metric_name, values):
+ if any([not cls._is_numeric(value) for value in values]):
+ raise TypeError('"%s" metric of "%s" contains non-numeric value: %s' % (metric_name, test_name, json.dumps(values)))
+
+ @classmethod
+ def _is_numeric(cls, value):
+ return isinstance(value, int) or isinstance(value, float)
--- /dev/null
+# Copyright (C) 2015 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from benchmark_results import BenchmarkResults
+
+
+class BenchmarkResultsTest(unittest.TestCase):
+ def test_init(self):
+ results = BenchmarkResults({'SomeTest': {'metrics': {'Time': {'current': [1, 2, 3]}}}})
+ self.assertEqual(results._results, {'SomeTest': {'metrics': {'Time': {None: {'current': [1, 2, 3]}}}, 'tests': {}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" contains non-numeric value: \[1, 2, "a"\]'):
+ BenchmarkResults({'SomeTest': {'metrics': {'Time': {'current': [1, 2, 'a']}}}})
+
+ def test_format(self):
+ result = BenchmarkResults({'SomeTest': {'metrics': {'Time': {'current': [1, 2, 3]}}}})
+ self.assertEqual(result.format(), 'SomeTest:Time: 2.0ms stdev=50.0%\n')
+
+ result = BenchmarkResults({'SomeTest': {'metrics': {'Time': {'current': [1, 2, 3]}, 'Score': {'current': [2, 3, 4]}}}})
+ self.assertEqual(result.format(), '''
+SomeTest:Score: 3.0pt stdev=33.3%
+ :Time: 2.0ms stdev=50.0%
+'''[1:])
+
+ result = BenchmarkResults({'SomeTest': {
+ 'metrics': {'Time': ['Total', 'Arithmetic']},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6]}}}}}})
+ self.assertEqual(result.format(), '''
+SomeTest:Time:Arithmetic: 3.0ms stdev=33.3%
+ :Time:Total: 7.0ms stdev=28.6%
+ SubTest1:Time: 2.0ms stdev=50.0%
+ SubTest2:Time: 5.0ms stdev=20.0%
+'''[1:])
+
+ def test_format_values_with_large_error(self):
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1, 2, 3]), '2.0/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [10, 20, 30]), '20/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [100, 200, 300]), '200/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1000, 2000, 3000]), '2.0K/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [10000, 20000, 30000]), '20K/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [100000, 200000, 300000]), '200K/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1000000, 2000000, 3000000]), '2.0M/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.1, 0.2, 0.3]), '200m/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.01, 0.02, 0.03]), '20m/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.001, 0.002, 0.003]), '2.0m/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.0001, 0.0002, 0.0003]), '200u/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.00001, 0.00002, 0.00003]), '20u/s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.000001, 0.000002, 0.000003]), '2.0u/s stdev=50.0%')
+
+ def test_format_values_with_small_error(self):
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1.1, 1.2, 1.3]), '1.20/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [11, 12, 13]), '12.0/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [110, 120, 130]), '120/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1100, 1200, 1300]), '1.20K/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [11000, 12000, 13000]), '12.0K/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [110000, 120000, 130000]), '120K/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [1100000, 1200000, 1300000]), '1.20M/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.11, 0.12, 0.13]), '120m/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.011, 0.012, 0.013]), '12.0m/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.0011, 0.0012, 0.0013]), '1.20m/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.00011, 0.00012, 0.00013]), '120u/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.000011, 0.000012, 0.000013]), '12.0u/s stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Runs', [0.0000011, 0.0000012, 0.0000013]), '1.20u/s stdev=8.3%')
+
+ def test_format_values_with_time(self):
+ self.assertEqual(BenchmarkResults._format_values('Time', [1, 2, 3]), '2.0ms stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [10, 20, 30]), '20ms stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [100, 200, 300]), '200ms stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [1000, 2000, 3000]), '2.0s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [10000, 20000, 30000]), '20s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [100000, 200000, 300000]), '200s stdev=50.0%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [0.11, 0.12, 0.13]), '120us stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [0.011, 0.012, 0.013]), '12.0us stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [0.0011, 0.0012, 0.0013]), '1.20us stdev=8.3%')
+ self.assertEqual(BenchmarkResults._format_values('Time', [0.00011, 0.00012, 0.00013]), '120ns stdev=8.3%')
+
+ def test_format_values_with_no_error(self):
+ self.assertEqual(BenchmarkResults._format_values('Time', [1, 1, 1]), '1.00ms stdev=0.0%')
+
+ def test_format_values_with_small_difference(self):
+ self.assertEqual(BenchmarkResults._format_values('Time', [5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]),
+ '4.05ms stdev=5.5%')
+
+ def test_aggregate_results(self):
+ self.maxDiff = None
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {'metrics': {'Time': {'current': [1, 2, 3]}}}}),
+ {'SomeTest': {'metrics': {'Time': {None: {'current': [1, 2, 3]}}}, 'tests': {}}})
+
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {
+ 'metrics': {'Time': ['Total']},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6]}}}}}}),
+ {'SomeTest': {
+ 'metrics': {'Time': {'Total': {'current': [5, 7, 9]}}},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {None: {'current': [1, 2, 3]}}}, 'tests': {}},
+ 'SubTest2': {'metrics': {'Time': {None: {'current': [4, 5, 6]}}}, 'tests': {}}}}})
+
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {
+ 'metrics': {'Time': ['Total'], 'Runs': ['Total']},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6]}}},
+ 'SubTest3': {'metrics': {'Runs': {'current': [7, 8, 9]}}}}}}),
+ {'SomeTest': {
+ 'metrics': {
+ 'Time': {'Total': {'current': [5, 7, 9]}},
+ 'Runs': {'Total': {'current': [7, 8, 9]}}},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {None: {'current': [1, 2, 3]}}}, 'tests': {}},
+ 'SubTest2': {'metrics': {'Time': {None: {'current': [4, 5, 6]}}}, 'tests': {}},
+ 'SubTest3': {'metrics': {'Runs': {None: {'current': [7, 8, 9]}}}, 'tests': {}}}}})
+
+ def test_aggregate_results_with_gropus(self):
+ self.maxDiff = None
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {
+ 'metrics': {'Time': ['Total']},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [[1, 2], [3, 4]]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [[5, 6], [7, 8]]}}}}}}),
+ {'SomeTest': {
+ 'metrics': {'Time': {'Total': {'current': [6, 8, 10, 12]}}},
+ 'tests': {
+ 'SubTest1': {'metrics': {'Time': {None: {'current': [1, 2, 3, 4]}}}, 'tests': {}},
+ 'SubTest2': {'metrics': {'Time': {None: {'current': [5, 6, 7, 8]}}}, 'tests': {}}}}})
+
+ def test_aggregate_nested_results(self):
+ self.maxDiff = None
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {
+ 'metrics': {'Time': ['Total']},
+ 'tests': {
+ 'SubTest1': {
+ 'metrics': {'Time': ['Total']},
+ 'tests': {
+ 'GrandChild1': {'metrics': {'Time': {'current': [1, 2]}}},
+ 'GrandChild2': {'metrics': {'Time': {'current': [3, 4]}}}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [5, 6]}}}}}}),
+ {'SomeTest': {
+ 'metrics': {'Time': {'Total': {'current': [9, 12]}}},
+ 'tests': {
+ 'SubTest1': {
+ 'metrics': {'Time': {'Total': {'current': [4, 6]}}},
+ 'tests': {
+ 'GrandChild1': {'metrics': {'Time': {None: {'current': [1, 2]}}}, 'tests': {}},
+ 'GrandChild2': {'metrics': {'Time': {None: {'current': [3, 4]}}}, 'tests': {}}}},
+ 'SubTest2': {'metrics': {'Time': {None: {'current': [5, 6]}}}, 'tests': {}}}}})
+
+ self.assertEqual(BenchmarkResults._aggregate_results(
+ {'SomeTest': {
+ 'metrics': {'Time': ['Total']},
+ 'tests': {
+ 'SubTest1': {
+ 'metrics': {'Time': ['Total', 'Arithmetic']},
+ 'tests': {
+ 'GrandChild1': {'metrics': {'Time': {'current': [1, 2]}}},
+ 'GrandChild2': {'metrics': {'Time': {'current': [3, 4]}}}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [5, 6]}}}}}}),
+ {'SomeTest': {
+ 'metrics': {'Time': {'Total': {'current': [9, 12]}}},
+ 'tests': {
+ 'SubTest1': {
+ 'metrics': {'Time': {'Total': {'current': [4, 6]}, 'Arithmetic': {'current': [2, 3]}}},
+ 'tests': {
+ 'GrandChild1': {'metrics': {'Time': {None: {'current': [1, 2]}}}, 'tests': {}},
+ 'GrandChild2': {'metrics': {'Time': {None: {'current': [3, 4]}}}, 'tests': {}}}},
+ 'SubTest2': {'metrics': {'Time': {None: {'current': [5, 6]}}}, 'tests': {}}}}})
+
+ def test_lint_results(self):
+ with self.assertRaisesRegexp(TypeError, r'"SomeTest" does not contain metrics'):
+ BenchmarkResults._lint_results({'SomeTest': {}})
+
+ with self.assertRaisesRegexp(TypeError, r'The metrics in "SomeTest" is not a dictionary'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': []}})
+
+ with self.assertRaisesRegexp(TypeError, r'The aggregator list is empty in "Time" metric of "SomeTest"'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': []}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" is not wrapped by a configuration; e.g. "current"'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': [1, 2]}}})
+
+ self.assertTrue(BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': {'current': [1, 2]}}}}))
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" was not an aggregator list or a dictionary of configurations: 1'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': 1}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" contains non-numeric value: \["Total"\]'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': {'current': ['Total']}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" contains non-numeric value: \["Total", "Geometric"\]'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': {'current': [['Total', 'Geometric']]}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"SomeTest" requires aggregation but "SomeTest" has no subtests'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" had invalid aggregator list: \["Total", "Total"\]'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total', 'Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': []}}}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" uses unknown aggregator: KittenMean'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['KittenMean']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': []}}}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" had a mismatching subtest values'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6, 7]}}}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" had a mismatching subtest values'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [[1, 2], [3]]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [[4, 5], [6, 7]]}}}}}})
+
+ with self.assertRaisesRegexp(TypeError, r'"Time" metric of "SomeTest" had malformed values: \[1, \[2\], 3\]'):
+ BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': {'current': [1, [2], 3]}}}})
+
+ self.assertTrue(BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [4, 5, 6], 'baseline': [7]}}}}}}))
+
+ self.assertTrue(BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [1, 2, 3]}}},
+ 'SubTest2': {'metrics': {'Runs': {'current': [4, 5, 6, 7]}}}}}}))
+
+ self.assertTrue(BenchmarkResults._lint_results({'SomeTest': {'metrics': {'Time': ['Total']}, 'tests': {
+ 'SubTest1': {'metrics': {'Time': {'current': [[1, 2], [3, 4]]}}},
+ 'SubTest2': {'metrics': {'Time': {'current': [[5, 6], [7, 8]]}}}}}}))