Unreviewed build fix after r185014. Some tests only have subtests and not metrics.
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 30 May 2015 08:52:28 +0000 (08:52 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 30 May 2015 08:52:28 +0000 (08:52 +0000)
* Scripts/webkitpy/benchmark_runner/benchmark_results.py:
(BenchmarkResults._lint_subtest_results):
* Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py:
(test_aggregate_nested_results):
(test_lint_results):
* WinLauncher/WinLauncher.vcxproj/WinLauncherResource.h:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@185031 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/benchmark_runner/benchmark_results.py
Tools/Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py

index d9f3efe4a484158372d3730ac104182d9d0c1238..712e5e926396ef7d14aa5ab72cd0aee37d9c81f9 100644 (file)
@@ -1,3 +1,14 @@
+2015-05-30  Ryosuke Niwa  <rniwa@webkit.org>
+
+        Unreviewed build fix after r185014. Some tests only have subtests and not metrics.
+
+        * Scripts/webkitpy/benchmark_runner/benchmark_results.py:
+        (BenchmarkResults._lint_subtest_results):
+        * Scripts/webkitpy/benchmark_runner/benchmark_results_unittest.py:
+        (test_aggregate_nested_results):
+        (test_lint_results):
+        * WinLauncher/WinLauncher.vcxproj/WinLauncherResource.h:
+
 2015-05-29  Ryosuke Niwa  <rniwa@webkit.org>
 
         run-benchmark should print out the results
index 4fc01318ec1aad8db01315376de53ea22bc452a2..91049505555cce9bc3c873a2a6364968164d7786 100644 (file)
@@ -179,22 +179,23 @@ class BenchmarkResults(object):
     def _lint_subtest_results(cls, subtests, parent_needing_aggregation):
         iteration_groups_by_config = {}
         for test_name, test in subtests.iteritems():
-            if 'metrics' not in test:
-                raise TypeError('"%s" does not contain metrics' % test_name)
-
-            metrics = test['metrics']
-            if not isinstance(metrics, dict):
-                raise TypeError('The metrics in "%s" is not a dictionary' % test_name)
-
             needs_aggregation = False
-            for metric_name, metric in metrics.iteritems():
-                if isinstance(metric, list):
-                    cls._lint_aggregator_list(test_name, metric_name, metric)
-                    needs_aggregation = True
-                elif isinstance(metric, dict):
-                    cls._lint_configuration(test_name, metric_name, metric, parent_needing_aggregation, iteration_groups_by_config)
-                else:
-                    raise TypeError('"%s" metric of "%s" was not an aggregator list or a dictionary of configurations: %s' % (metric_name, test_name, str(metric)))
+
+            if 'metrics' not in test and 'tests' not in test:
+                raise TypeError('"%s" does not contain metrics or tests' % test_name)
+
+            if 'metrics' in test:
+                metrics = test['metrics']
+                if not isinstance(metrics, dict):
+                    raise TypeError('The metrics in "%s" is not a dictionary' % test_name)
+                for metric_name, metric in metrics.iteritems():
+                    if isinstance(metric, list):
+                        cls._lint_aggregator_list(test_name, metric_name, metric)
+                        needs_aggregation = True
+                    elif isinstance(metric, dict):
+                        cls._lint_configuration(test_name, metric_name, metric, parent_needing_aggregation, iteration_groups_by_config)
+                    else:
+                        raise TypeError('"%s" metric of "%s" was not an aggregator list or a dictionary of configurations: %s' % (metric_name, test_name, str(metric)))
 
             if 'tests' in test:
                 cls._lint_subtest_results(test['tests'], test_name if needs_aggregation else None)
index 0411be97e7dd5abc1d71c6295adbb569c7719571..f66e981ca1b8e8a7678cb2126ddae4a28a314423 100644 (file)
@@ -195,7 +195,7 @@ SomeTest:Time:Arithmetic: 3.0ms stdev=33.3%
                     'SubTest2': {'metrics': {'Time': {None: {'current': [5, 6]}}}, 'tests': {}}}}})
 
     def test_lint_results(self):
-        with self.assertRaisesRegexp(TypeError, r'"SomeTest" does not contain metrics'):
+        with self.assertRaisesRegexp(TypeError, r'"SomeTest" does not contain metrics or tests'):
             BenchmarkResults._lint_results({'SomeTest': {}})
 
         with self.assertRaisesRegexp(TypeError, r'The metrics in "SomeTest" is not a dictionary'):