Automate DoYouEvenBench
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 17 Jan 2014 06:06:36 +0000 (06:06 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 17 Jan 2014 06:06:36 +0000 (06:06 +0000)
https://bugs.webkit.org/show_bug.cgi?id=124497

Reviewed by Geoffrey Garen.

PerformanceTests:

Enable DoYouEvenBench/Full.html on perf bots by default.

Put a space between the time and ms, and fixed a typo in runner.js so that the aggregator name will be reported.

* DoYouEvenBench/Full.html:
* Skipped:
* resources/runner.js:

Tools:

* Scripts/webkitpy/performance_tests/perftest.py:
(PerfTestMetric.__init__): Added the aggregator name as an argument.
(PerfTestMetric.aggregator): Added.
(PerfTest._metrics_regex): Made the subtest name match non-greedy so that the metric names will be
won't be eagerly parsed as a part of the subtest name. e.g. "Time" and "Total" in "a:Time:Total"
should be parsed as the metric and the aggregator respectively.
(PerfTest._run_with_driver): Pass in the aggregator name.
(PerfTest._ensure_metrics): Ditto. Also split the subtest name by / as required by DoYouEvenBench
which generates subtests of subtests within a single test file.

* Scripts/webkitpy/performance_tests/perftest_unittest.py:
(test_parse_output_with_subtests_and_total): Added.

* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(_generate_results_dict): Add the aggregator name to the JSON when one is available.

* Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py:
(TestWithSubtestsData): Added a sub test with an aggregator and a sub-sub test.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@162183 268f45cc-cd09-0410-ab3c-d52691b4dbfc

PerformanceTests/ChangeLog
PerformanceTests/DoYouEvenBench/Full.html
PerformanceTests/Skipped
PerformanceTests/resources/runner.js
Tools/ChangeLog
Tools/Scripts/webkitpy/performance_tests/perftest.py
Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py

index 383bf9e..c20ddc3 100644 (file)
@@ -1,3 +1,18 @@
+2014-01-16  Ryosuke Niwa  <rniwa@webkit.org>
+
+        Automate DoYouEvenBench
+        https://bugs.webkit.org/show_bug.cgi?id=124497
+
+        Reviewed by Geoffrey Garen.
+
+        Enable DoYouEvenBench/Full.html on perf bots by default.
+
+        Put a space between the time and ms, and fixed a typo in runner.js so that the aggregator name will be reported.
+
+        * DoYouEvenBench/Full.html:
+        * Skipped:
+        * resources/runner.js:
+
 2014-01-15  Manuel Rego Casasnovas  <rego@igalia.com>
 
         [CSS Regions] Add performance tests for selection with mixed content
index f659ead..5ae4c1c 100644 (file)
             }
             values.push(measuredValues.total);
             iterationNumber++;
-            pre.appendChild(document.createTextNode('Iteration ' + iterationNumber + ': ' + measuredValues.total + 'ms\n'));
+            pre.appendChild(document.createTextNode('Iteration ' + iterationNumber + ': ' + measuredValues.total + ' ms\n'));
         },
         didFinishLastIteration: function () {
             var sum = 0;
             for (var i = 0; i < values.length; i++)
                 sum += values[i];
-            pre.appendChild(document.createTextNode('Average: ' + (sum / iterationNumber)  + 'ms\n'));
+            pre.appendChild(document.createTextNode('Average: ' + (sum / iterationNumber)  + ' ms\n'));
             pre.style.paddingTop = 0;
         }
     }
index 353fd06..3e8f1d3 100644 (file)
@@ -89,5 +89,5 @@ Layout/RegionsSelectAllMixedContent.html
 # https://bugs.webkit.org/show_bug.cgi?id=113811#c2
 Layout/LineLayoutJapanese.html
 
-# New DOM benchmark is not ready for the prime time yet.
-DoYouEvenBench
+# Don't run the interactive runner. We run Full.html
+DoYouEvenBench/benchmark.html
index ee1c630..eb01515 100755 (executable)
@@ -221,7 +221,7 @@ if (window.testRunner) {
             if (currentTest.description)
                 PerfTestRunner.log("Description: " + currentTest.description);
             metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[PerfTestRunner.unit];
-            var suffix = currentTest.aggregation ? ':' + currentTest.aggregation : '';
+            var suffix = currentTest.aggregator ? ':' + currentTest.aggregator : '';
             PerfTestRunner.logStatistics(results, PerfTestRunner.unit, prefix + ":" + metric + suffix);
             if (jsHeapResults.length) {
                 PerfTestRunner.logStatistics(jsHeapResults, "bytes", prefix + ":JSHeap");
index 65da477..239aad2 100644 (file)
@@ -1,3 +1,29 @@
+2014-01-16  Ryosuke Niwa  <rniwa@webkit.org>
+
+        Automate DoYouEvenBench
+        https://bugs.webkit.org/show_bug.cgi?id=124497
+
+        Reviewed by Geoffrey Garen.
+
+        * Scripts/webkitpy/performance_tests/perftest.py:
+        (PerfTestMetric.__init__): Added the aggregator name as an argument.
+        (PerfTestMetric.aggregator): Added.
+        (PerfTest._metrics_regex): Made the subtest name match non-greedy so that the metric names will be
+        won't be eagerly parsed as a part of the subtest name. e.g. "Time" and "Total" in "a:Time:Total"
+        should be parsed as the metric and the aggregator respectively.
+        (PerfTest._run_with_driver): Pass in the aggregator name.
+        (PerfTest._ensure_metrics): Ditto. Also split the subtest name by / as required by DoYouEvenBench
+        which generates subtests of subtests within a single test file.
+
+        * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+        (test_parse_output_with_subtests_and_total): Added.
+
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (_generate_results_dict): Add the aggregator name to the JSON when one is available.
+
+        * Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py:
+        (TestWithSubtestsData): Added a sub test with an aggregator and a sub-sub test.
+
 2014-01-16  Chris Fleizach  <cfleizach@apple.com>
 
         platform/mac/accessibility/aria-multiline.html sometimes asserts in AccessibilityController::removeNotificationListener
index b88aa55..1cfc5f1 100644 (file)
@@ -50,10 +50,11 @@ _log = logging.getLogger(__name__)
 
 
 class PerfTestMetric(object):
-    def __init__(self, path, test_file_name, metric, unit=None, iterations=None):
+    def __init__(self, path, test_file_name, metric, unit=None, aggregator=None, iterations=None):
         # FIXME: Fix runner.js to report correct metric names
         self._iterations = iterations or []
         self._unit = unit or self.metric_to_unit(metric)
+        self._aggregator = aggregator
         self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
         self._path = path
         self._test_file_name = test_file_name
@@ -61,6 +62,9 @@ class PerfTestMetric(object):
     def name(self):
         return self._metric
 
+    def aggregator(self):
+        return self._aggregator
+
     def path(self):
         return self._path
 
@@ -168,7 +172,7 @@ class PerfTest(object):
             (median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit))
 
     _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
-    _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+)?:(?P<metric>[A-Z][A-Za-z]+)(:(?P<aggregator>[A-Z][A-Za-z]+))? -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$')
+    _metrics_regex = re.compile(r'^(?P<subtest>[A-Za-z0-9\(\[].+?)?:(?P<metric>[A-Z][A-Za-z]+)(:(?P<aggregator>[A-Z][A-Za-z]+))? -> \[(?P<values>(\d+(\.\d+)?)(, \d+(\.\d+)?)+)\] (?P<unit>[a-z/]+)?$')
 
     def _run_with_driver(self, driver, time_out_ms):
         output = self.run_single(driver, self.test_path(), time_out_ms)
@@ -188,12 +192,12 @@ class PerfTest(object):
                 _log.error('ERROR: ' + line)
                 return False
 
-            metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit'))
+            metric = self._ensure_metrics(metric_match.group('metric'), metric_match.group('subtest'), metric_match.group('unit'), metric_match.group('aggregator'))
             metric.append_group(map(lambda value: float(value), metric_match.group('values').split(', ')))
 
         return True
 
-    def _ensure_metrics(self, metric_name, subtest_name='', unit=None):
+    def _ensure_metrics(self, metric_name, subtest_name='', unit=None, aggregator=None):
         try:
             subtest = next(subtest for subtest in self._metrics if subtest['name'] == subtest_name)
         except StopIteration:
@@ -205,8 +209,8 @@ class PerfTest(object):
         except StopIteration:
             path = self.test_name_without_file_extension().split('/')
             if subtest_name:
-                path += [subtest_name]
-            metric = PerfTestMetric(path, self._test_name, metric_name, unit)
+                path += subtest_name.split('/')
+            metric = PerfTestMetric(path, self._test_name, metric_name, unit, aggregator)
             subtest['metrics'].append(metric)
             return metric
 
index 23818e4..5a0fbc7 100644 (file)
@@ -211,6 +211,55 @@ RESULT some-dir: some-test: Time= 1100.0 ms
 median= 1101.0 ms, stdev= 13.3140211016 ms, min= 1080.0 ms, max= 1120.0 ms
 """)
 
+    def test_parse_output_with_subtests_and_total(self):
+        output = DriverOutput("""
+:Time:Total -> [2324, 2328, 2345, 2314, 2312] ms
+EmberJS-TodoMVC:Time:Total -> [1462, 1473, 1490, 1465, 1458] ms
+EmberJS-TodoMVC/a:Time -> [1, 2, 3, 4, 5] ms
+BackboneJS-TodoMVC:Time -> [862, 855, 855, 849, 854] ms
+""", image=None, image_hash=None, audio=None)
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PerfTest(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
+            test.run_single = lambda driver, path, time_out_ms: output
+            self.assertTrue(test.run(10))
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+        subtests = test._metrics
+        self.assertEqual(map(lambda test: test['name'], subtests), [None, 'EmberJS-TodoMVC', 'EmberJS-TodoMVC/a', 'BackboneJS-TodoMVC'])
+
+        main_metrics = subtests[0]['metrics']
+        self.assertEqual(map(lambda metric: metric.name(), main_metrics), ['Time'])
+        self.assertEqual(main_metrics[0].aggregator(), 'Total')
+        self.assertEqual(main_metrics[0].path(), ['some-dir', 'some-test'])
+        self.assertEqual(main_metrics[0].flattened_iteration_values(), [2324, 2328, 2345, 2314, 2312] * 4)
+
+        some_test_metrics = subtests[1]['metrics']
+        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
+        self.assertEqual(some_test_metrics[0].aggregator(), 'Total')
+        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC'])
+        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1462, 1473, 1490, 1465, 1458] * 4)
+
+        some_test_metrics = subtests[2]['metrics']
+        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
+        self.assertEqual(some_test_metrics[0].aggregator(), None)
+        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'EmberJS-TodoMVC', 'a'])
+        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [1, 2, 3, 4, 5] * 4)
+
+        some_test_metrics = subtests[3]['metrics']
+        self.assertEqual(map(lambda metric: metric.name(), some_test_metrics), ['Time'])
+        self.assertEqual(some_test_metrics[0].aggregator(), None)
+        self.assertEqual(some_test_metrics[0].path(), ['some-dir', 'some-test', 'BackboneJS-TodoMVC'])
+        self.assertEqual(some_test_metrics[0].flattened_iteration_values(), [862, 855, 855, 849, 854] * 4)
+
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, """RESULT some-dir: some-test: Time= 2324.6 ms
+median= 2324.0 ms, stdev= 12.1326007105 ms, min= 2312.0 ms, max= 2345.0 ms
+""")
+
 
 class TestSingleProcessPerfTest(unittest.TestCase):
     def test_use_only_one_process(self):
index 19da752..0d3feef 100644 (file)
@@ -269,7 +269,10 @@ class PerfTestsRunner(object):
                     current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                     current_test.setdefault('metrics', {})
                     assert metric.name() not in current_test['metrics']
-                    current_test['metrics'][metric.name()] = {'current': metric.grouped_iteration_values()}
+                    test_results = {'current': metric.grouped_iteration_values()}
+                    if metric.aggregator():
+                        test_results['aggregators'] = [metric.aggregator()]
+                    current_test['metrics'][metric.name()] = test_results
                 else:
                     current_test.setdefault('tests', {})
                     tests = current_test['tests']
index 06d3b5c..c952bb5 100644 (file)
@@ -98,6 +98,8 @@ Finished: 0.1 s
 
 class TestWithSubtestsData:
     text = """subtest:Time -> [1, 2, 3, 4, 5] ms
+total-test:Time:Total -> [1, 2, 3, 4, 5] ms
+total-test/subsubtest:Time -> [1, 2, 3, 4, 5] ms
 :Time -> [1080, 1120, 1095, 1101, 1104] ms
 """
 
@@ -113,7 +115,14 @@ Finished: 0.1 s
         'tests': {
             'subtest': {
                 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
-                'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}}
+                'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}},
+            'total-test': {
+                'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
+                'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4, "aggregators": ["Total"]}},
+                'tests': {
+                    'subsubtest':
+                        {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/test-with-subtests.html',
+                        'metrics': {'Time': {'current': [[1.0, 2.0, 3.0, 4.0, 5.0]] * 4}}}}}}}
 
 
 class TestDriver: