run-perf-tests should generate a json file that summaries the result
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 18 Jan 2012 10:52:41 +0000 (10:52 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 18 Jan 2012 10:52:41 +0000 (10:52 +0000)
https://bugs.webkit.org/show_bug.cgi?id=76504

Reviewed by Adam Barth.

Add an ability to generate a json file to run-perf-test in the preparation for perf bots.
New option --outout-json-path specifies the json file's path, and --source-json-path specifies
another json file to be merged into the generated json file.

Also fixed a bug that --build wasn't set by default.

* Scripts/webkitpy/layout_tests/port/test.py:
(TestPort.webkit_base):
* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner.__init__):
(PerfTestsRunner):
(PerfTestsRunner._parse_args):
(PerfTestsRunner.run):
(PerfTestsRunner._generate_json_if_specified):
(PerfTestsRunner._process_chromium_style_test_result):
(PerfTestsRunner._process_parser_test_result):
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(MainTest):
(create_runner):
(test_run_test_set):
(test_run_test_set_for_parser_tests):
(test_run_test_set_with_summary_json):
(test_collect_tests):
(test_parse_args):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@105256 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/port/test.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

index 1c31ea0d08695b0e7d2101e7b9bfab91349cb2c1..fd870acdd64ba15366dae973d712a99297ba1be3 100644 (file)
@@ -1,3 +1,35 @@
+2012-01-18  Ryosuke Niwa  <rniwa@webkit.org>
+
+        run-perf-tests should generate a json file that summaries the result
+        https://bugs.webkit.org/show_bug.cgi?id=76504
+
+        Reviewed by Adam Barth.
+
+        Add an ability to generate a json file to run-perf-test in the preparation for perf bots.
+        New option --outout-json-path specifies the json file's path, and --source-json-path specifies
+        another json file to be merged into the generated json file.
+
+        Also fixed a bug that --build wasn't set by default.
+
+        * Scripts/webkitpy/layout_tests/port/test.py:
+        (TestPort.webkit_base):
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (PerfTestsRunner.__init__):
+        (PerfTestsRunner):
+        (PerfTestsRunner._parse_args):
+        (PerfTestsRunner.run):
+        (PerfTestsRunner._generate_json_if_specified):
+        (PerfTestsRunner._process_chromium_style_test_result):
+        (PerfTestsRunner._process_parser_test_result):
+        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+        (MainTest):
+        (create_runner):
+        (test_run_test_set):
+        (test_run_test_set_for_parser_tests):
+        (test_run_test_set_with_summary_json):
+        (test_collect_tests):
+        (test_parse_args):
+
 2012-01-17  Sheriff Bot  <webkit.review.bot@gmail.com>
 
         Unreviewed, rolling out r105244.
index 4cc24a3875ff2b59f577c1a7a7d3615301f7ae25..d6d69f5f58db8b7355df0d3372e67d50e500f323 100644 (file)
@@ -384,6 +384,9 @@ class TestPort(Port):
     def layout_tests_dir(self):
         return LAYOUT_TEST_DIR
 
+    def webkit_base(self):
+        return '/test.checkout'
+
     def name(self):
         return self._name
 
index 3badfe847a59d7af2a3d207c0db8631928590c19..21bdab931c3bdc45d3ca519f9f01b3bb4895063b 100644 (file)
 
 """Run Inspector's perf tests in perf mode."""
 
+import json
 import logging
 import optparse
 import re
 import sys
+import time
 
 from webkitpy.common import find_files
 from webkitpy.common.host import Host
@@ -46,17 +48,20 @@ class PerfTestsRunner(object):
     _perf_tests_base_dir = 'PerformanceTests'
     _test_directories_for_chromium_style_tests = ['inspector']
 
-    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
+    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
         self._buildbot_output = buildbot_output
-        self._options, self._args = self._parse_args(args)
-        self._host = Host()
+        self._options, self._args = PerfTestsRunner._parse_args(args)
+        self._port = port or self._host.port_factory.get(self._options.platform, self._options)
+        self._host = self._port.host
         self._host._initialize_scm()
-        self._port = self._host.port_factory.get(self._options.platform, self._options)
         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
         self._webkit_base_dir_len = len(self._port.webkit_base())
         self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)
+        self._results = {}
+        self._timestamp = time.time()
 
-    def _parse_args(self, args=None):
+    @staticmethod
+    def _parse_args(args=None):
         print_options = printing.print_options()
 
         perf_option_list = [
@@ -66,10 +71,16 @@ class PerfTestsRunner(object):
                                  help='Set the configuration to Release'),
             optparse.make_option("--platform",
                                  help="Specify port/platform being tested (i.e. chromium-mac)"),
+            optparse.make_option("--build", dest="build", action="store_true", default=True,
+                                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
             optparse.make_option("--build-directory",
                                  help="Path to the directory under which build files are kept (should not include configuration)"),
             optparse.make_option("--time-out-ms", default=30000,
                                  help="Set the timeout for each test"),
+            optparse.make_option("--output-json-path",
+                                 help="Filename of the JSON file that summaries the results"),
+            optparse.make_option("--source-json-path",
+                                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is specified"),
             ]
 
         option_list = (perf_option_list + print_options)
@@ -102,8 +113,36 @@ class PerfTestsRunner(object):
         finally:
             self._printer.cleanup()
 
+        if not self._generate_json_if_specified(self._timestamp) and not unexpected:
+            return -2
+
         return unexpected
 
+    def _generate_json_if_specified(self, timestamp):
+        output_json_path = self._options.output_json_path
+        if not output_json_path:
+            return True
+
+        revision = self._host.scm().head_svn_revision()
+        contents = {'timestamp': int(timestamp), 'revision': revision, 'results': self._results}
+
+        filesystem = self._host.filesystem
+        source_json_path = self._options.source_json_path
+        if source_json_path:
+            try:
+                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
+                source_json = json.load(source_json_file)
+            except:
+                _log.error("Failed to parse %s" % source_json_path)
+                return False
+            if not isinstance(source_json, dict):
+                _log.error("The source JSON was not a dictionary")
+                return False
+            contents = dict(source_json.items() + contents.items())
+
+        filesystem.write_text_file(output_json_path, json.dumps(contents))
+        return True
+
     def _print_status(self, tests, expected, unexpected):
         if len(tests) == expected + unexpected:
             status = "Ran %d tests" % len(tests)
@@ -145,13 +184,15 @@ class PerfTestsRunner(object):
 
         return unexpected
 
-    _inspector_result_regex = re.compile('^RESULT .*$')
+    _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
 
     def _process_chromium_style_test_result(self, test, output):
         test_failed = False
         got_a_result = False
         for line in re.split('\n', output.text):
-            if self._inspector_result_regex.match(line):
+            resultLine = self._inspector_result_regex.match(line)
+            if resultLine:
+                self._results[resultLine.group('name').replace(' ', '')] = int(resultLine.group('value'))
                 self._buildbot_output.write("%s\n" % line)
                 got_a_result = True
             elif not len(line) == 0:
@@ -194,6 +235,7 @@ class PerfTestsRunner(object):
 
         if test_failed or set(keys) != set(results.keys()):
             return True
+        self._results[test_name] = results
         self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
         self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
         return False
index 3ec6bfb81a477d4ad45a55d54f8b652b2fa5a36c..ca64cd682d3641d4740278eb039a1c7f2fb5c463 100755 (executable)
 
 """Unit tests for run_perf_tests."""
 
+import json
 import unittest
 
 from webkitpy.common import array_stream
+from webkitpy.common.host_mock import MockHost
 from webkitpy.common.system.filesystem_mock import MockFileSystem
 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.layout_tests.port.test import TestPort
 from webkitpy.layout_tests.views import printing
 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
 
 
 class MainTest(unittest.TestCase):
-    class TestPort:
-        def create_driver(self, worker_number=None):
-            return MainTest.TestDriver()
-
     class TestDriver:
         def run_test(self, driver_input):
             text = ''
@@ -103,10 +102,19 @@ max 1120
         def stop(self):
             """do nothing"""
 
-    def create_runner(self, buildbot_output=None):
+    def create_runner(self, buildbot_output=None, args=[]):
         buildbot_output = buildbot_output or array_stream.ArrayStream()
         regular_output = array_stream.ArrayStream()
-        return PerfTestsRunner(regular_output, buildbot_output, args=[])
+
+        options, parsed_args = PerfTestsRunner._parse_args(args)
+        test_port = TestPort(host=MockHost(), options=options)
+        test_port.create_driver = lambda worker_number=None: MainTest.TestDriver()
+
+        runner = PerfTestsRunner(regular_output, buildbot_output, args=args, port=test_port)
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+        runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+        return runner
 
     def run_test(self, test_name):
         runner = self.create_runner()
@@ -146,12 +154,10 @@ max 1120
     def test_run_test_set(self):
         buildbot_output = array_stream.ArrayStream()
         runner = self.create_runner(buildbot_output)
-        runner._base_path = '/test.checkout/PerformanceTests'
-        port = MainTest.TestPort()
         dirname = runner._base_path + '/inspector/'
         tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html',
             dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html']
-        unexpected_result_count = runner._run_tests_set(tests, port)
+        unexpected_result_count = runner._run_tests_set(tests, runner._port)
         self.assertEqual(unexpected_result_count, len(tests) - 1)
         self.assertEqual(len(buildbot_output.get()), 1)
         self.assertEqual(buildbot_output.get()[0], 'RESULT group_name: test_name= 42 ms\n')
@@ -159,35 +165,71 @@ max 1120
     def test_run_test_set_for_parser_tests(self):
         buildbot_output = array_stream.ArrayStream()
         runner = self.create_runner(buildbot_output)
-        runner._base_path = '/test.checkout/PerformanceTests/'
-        port = MainTest.TestPort()
-        tests = [runner._base_path + 'Bindings/event-target-wrapper.html', runner._base_path + 'Parser/some-parser.html']
-        unexpected_result_count = runner._run_tests_set(tests, port)
+        tests = [runner._base_path + '/Bindings/event-target-wrapper.html', runner._base_path + '/Parser/some-parser.html']
+        unexpected_result_count = runner._run_tests_set(tests, runner._port)
         self.assertEqual(unexpected_result_count, 0)
         self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n')
         self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
         self.assertEqual(buildbot_output.get()[2], 'RESULT Parser: some-parser= 1100 ms\n')
         self.assertEqual(buildbot_output.get()[3], 'median= 1101 ms, stdev= 11 ms, min= 1080 ms, max= 1120 ms\n')
 
+    def test_run_test_set_with_json_output(self):
+        buildbot_output = array_stream.ArrayStream()
+        runner = self.create_runner(buildbot_output, args=['--output-json-path=/test.checkout/output.json'])
+        runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
+        runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
+        runner._timestamp = 123456789
+        self.assertEqual(runner.run(), 0)
+        self.assertEqual(len(buildbot_output.get()), 3)
+        self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n')
+        self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
+        self.assertEqual(buildbot_output.get()[2], 'RESULT group_name: test_name= 42 ms\n')
+
+        self.assertEqual(json.loads(runner._host.filesystem.files['/test.checkout/output.json']), {
+            "timestamp": 123456789, "results":
+            {"event-target-wrapper": {"max": "1510", "avg": "1489.05", "median": "1487", "min": "1471", "stdev": "14.46"},
+            "group_name:test_name": 42},
+            "revision": 1234})
+
+    def test_run_test_set_with_json_source(self):
+        buildbot_output = array_stream.ArrayStream()
+        runner = self.create_runner(buildbot_output, args=['--output-json-path=/test.checkout/output.json',
+            '--source-json-path=/test.checkout/source.json'])
+        runner._host.filesystem.files['/test.checkout/source.json'] = '{"key": "value"}'
+        runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
+        runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
+        runner._timestamp = 123456789
+        self.assertEqual(runner.run(), 0)
+        self.assertEqual(len(buildbot_output.get()), 3)
+        self.assertEqual(buildbot_output.get()[0], 'RESULT Bindings: event-target-wrapper= 1489.05 ms\n')
+        self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
+        self.assertEqual(buildbot_output.get()[2], 'RESULT group_name: test_name= 42 ms\n')
+
+        self.assertEqual(json.loads(runner._host.filesystem.files['/test.checkout/output.json']), {
+            "timestamp": 123456789, "results":
+            {"event-target-wrapper": {"max": "1510", "avg": "1489.05", "median": "1487", "min": "1471", "stdev": "14.46"},
+            "group_name:test_name": 42},
+            "revision": 1234,
+            "key": "value"})
+
     def test_collect_tests(self):
         runner = self.create_runner()
-        runner._base_path = '/test.checkout/PerformanceTests'
-        filesystem = MockFileSystem()
-        filename = filesystem.join(runner._base_path, 'inspector', 'a_file.html')
-        filesystem.maybe_make_directory(runner._base_path, 'inspector')
-        filesystem.files[filename] = 'a content'
-        runner._host.filesystem = filesystem
+        filename = runner._host.filesystem.join(runner._base_path, 'inspector', 'a_file.html')
+        runner._host.filesystem.files[filename] = 'a content'
         tests = runner._collect_tests()
         self.assertEqual(len(tests), 1)
 
     def test_parse_args(self):
         runner = self.create_runner()
-        options, args = runner._parse_args([
+        options, args = PerfTestsRunner._parse_args([
                 '--verbose',
                 '--build-directory=folder42',
                 '--platform=platform42',
                 '--time-out-ms=42',
+                '--output-json-path=a/output.json',
+                '--source-json-path=a/source.json',
                 '--debug', 'an_arg'])
+        self.assertEqual(options.build, True)
         self.assertEqual(options.verbose, True)
         self.assertEqual(options.help_printing, None)
         self.assertEqual(options.build_directory, 'folder42')
@@ -195,6 +237,8 @@ max 1120
         self.assertEqual(options.time_out_ms, '42')
         self.assertEqual(options.configuration, 'Debug')
         self.assertEqual(options.print_options, None)
+        self.assertEqual(options.output_json_path, 'a/output.json')
+        self.assertEqual(options.source_json_path, 'a/source.json')
 
 
 if __name__ == '__main__':