21bdab931c3bdc45d3ca519f9f01b3bb4895063b
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2011 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.layout_tests.port.driver import DriverInput
42 from webkitpy.layout_tests.views import printing
43
44 _log = logging.getLogger(__name__)
45
46
47 class PerfTestsRunner(object):
48     _perf_tests_base_dir = 'PerformanceTests'
49     _test_directories_for_chromium_style_tests = ['inspector']
50
51     def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
52         self._buildbot_output = buildbot_output
53         self._options, self._args = PerfTestsRunner._parse_args(args)
54         self._port = port or self._host.port_factory.get(self._options.platform, self._options)
55         self._host = self._port.host
56         self._host._initialize_scm()
57         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
58         self._webkit_base_dir_len = len(self._port.webkit_base())
59         self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)
60         self._results = {}
61         self._timestamp = time.time()
62
63     @staticmethod
64     def _parse_args(args=None):
65         print_options = printing.print_options()
66
67         perf_option_list = [
68             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
69                                  help='Set the configuration to Debug'),
70             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
71                                  help='Set the configuration to Release'),
72             optparse.make_option("--platform",
73                                  help="Specify port/platform being tested (i.e. chromium-mac)"),
74             optparse.make_option("--build", dest="build", action="store_true", default=True,
75                                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
76             optparse.make_option("--build-directory",
77                                  help="Path to the directory under which build files are kept (should not include configuration)"),
78             optparse.make_option("--time-out-ms", default=30000,
79                                  help="Set the timeout for each test"),
80             optparse.make_option("--output-json-path",
81                                  help="Filename of the JSON file that summaries the results"),
82             optparse.make_option("--source-json-path",
83                                  help="Path to a JSON file to be merged into the JSON file when --output-json-path is specified"),
84             ]
85
86         option_list = (perf_option_list + print_options)
87         return optparse.OptionParser(option_list=option_list).parse_args(args)
88
89     def _collect_tests(self):
90         """Return the list of tests found."""
91
92         def _is_test_file(filesystem, dirname, filename):
93             return filename.endswith('.html')
94
95         return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)
96
97     def run(self):
98         if self._options.help_printing:
99             self._printer.help_printing()
100             self._printer.cleanup()
101             return 0
102
103         if not self._port.check_build(needs_http=False):
104             _log.error("Build not up to date for %s" % self._port._path_to_driver())
105             return -1
106
107         # We wrap any parts of the run that are slow or likely to raise exceptions
108         # in a try/finally to ensure that we clean up the logging configuration.
109         unexpected = -1
110         try:
111             tests = self._collect_tests()
112             unexpected = self._run_tests_set(tests, self._port)
113         finally:
114             self._printer.cleanup()
115
116         if not self._generate_json_if_specified(self._timestamp) and not unexpected:
117             return -2
118
119         return unexpected
120
121     def _generate_json_if_specified(self, timestamp):
122         output_json_path = self._options.output_json_path
123         if not output_json_path:
124             return True
125
126         revision = self._host.scm().head_svn_revision()
127         contents = {'timestamp': int(timestamp), 'revision': revision, 'results': self._results}
128
129         filesystem = self._host.filesystem
130         source_json_path = self._options.source_json_path
131         if source_json_path:
132             try:
133                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
134                 source_json = json.load(source_json_file)
135             except:
136                 _log.error("Failed to parse %s" % source_json_path)
137                 return False
138             if not isinstance(source_json, dict):
139                 _log.error("The source JSON was not a dictionary")
140                 return False
141             contents = dict(source_json.items() + contents.items())
142
143         filesystem.write_text_file(output_json_path, json.dumps(contents))
144         return True
145
146     def _print_status(self, tests, expected, unexpected):
147         if len(tests) == expected + unexpected:
148             status = "Ran %d tests" % len(tests)
149         else:
150             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
151         if unexpected:
152             status += " (%d didn't run)" % unexpected
153         self._printer.write(status)
154
155     def _run_tests_set(self, tests, port):
156         result_count = len(tests)
157         expected = 0
158         unexpected = 0
159         driver_need_restart = False
160         driver = None
161
162         for test in tests:
163             if driver_need_restart:
164                 _log.debug("%s killing driver" % test)
165                 driver.stop()
166                 driver = None
167             if not driver:
168                 driver = port.create_driver(worker_number=1)
169
170             relative_test_path = self._host.filesystem.relpath(test, self._base_path)
171             self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
172
173             is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
174             test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
175             if test_failed:
176                 unexpected = unexpected + 1
177             else:
178                 expected = expected + 1
179
180             self._printer.write('')
181
182         if driver:
183             driver.stop()
184
185         return unexpected
186
187     _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
188
189     def _process_chromium_style_test_result(self, test, output):
190         test_failed = False
191         got_a_result = False
192         for line in re.split('\n', output.text):
193             resultLine = self._inspector_result_regex.match(line)
194             if resultLine:
195                 self._results[resultLine.group('name').replace(' ', '')] = int(resultLine.group('value'))
196                 self._buildbot_output.write("%s\n" % line)
197                 got_a_result = True
198             elif not len(line) == 0:
199                 test_failed = True
200                 self._printer.write("%s" % line)
201         return test_failed or not got_a_result
202
203     _lines_to_ignore_in_parser_result = [
204         re.compile(r'^Running \d+ times$'),
205         re.compile(r'^Ignoring warm-up '),
206         re.compile(r'^\d+$'),
207     ]
208
209     def _should_ignore_line_in_parser_test_result(self, line):
210         if not line:
211             return True
212         for regex in self._lines_to_ignore_in_parser_result:
213             if regex.match(line):
214                 return True
215         return False
216
217     def _process_parser_test_result(self, test, output):
218         got_a_result = False
219         test_failed = False
220         filesystem = self._host.filesystem
221         category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
222         test_name = filesystem.splitext(test_name)[0]
223         results = {}
224         keys = ['avg', 'median', 'stdev', 'min', 'max']
225         score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
226         for line in re.split('\n', output.text):
227             score = score_regex.match(line)
228             if score:
229                 results[score.group(1)] = score.group(2)
230                 continue
231
232             if not self._should_ignore_line_in_parser_test_result(line):
233                 test_failed = True
234                 self._printer.write("%s" % line)
235
236         if test_failed or set(keys) != set(results.keys()):
237             return True
238         self._results[test_name] = results
239         self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
240         self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
241         return False
242
243     def _run_single_test(self, test, driver, is_chromium_style):
244         test_failed = False
245         driver_need_restart = False
246         output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
247
248         if output.text == None:
249             test_failed = True
250         elif output.timeout:
251             self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
252             test_failed = True
253             driver_need_restart = True
254         elif output.crash:
255             self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
256             driver_need_restart = True
257             test_failed = True
258         else:
259             if is_chromium_style:
260                 test_failed = self._process_chromium_style_test_result(test, output)
261             else:
262                 test_failed = self._process_parser_test_result(test, output)
263
264         if len(output.error):
265             self._printer.write('error:\n%s' % output.error)
266             test_failed = True
267
268         if test_failed:
269             self._printer.write('FAILED')
270
271         return test_failed, driver_need_restart