3ae379221a136dc1d2ffc9ec86d089146aca8abf
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2011 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.port.driver import DriverInput
43 from webkitpy.layout_tests.views import printing
44
45 _log = logging.getLogger(__name__)
46
47
48 class PerfTestsRunner(object):
49     _perf_tests_base_dir = 'PerformanceTests'
50     _test_directories_for_chromium_style_tests = ['inspector']
51     _default_branch = 'webkit-trunk'
52     _EXIT_CODE_BAD_BUILD = -1
53     _EXIT_CODE_BAD_JSON = -2
54     _EXIT_CODE_FAILED_UPLOADING = -3
55
56     def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
57         self._buildbot_output = buildbot_output
58         self._options, self._args = PerfTestsRunner._parse_args(args)
59         if port:
60             self._port = port
61             self._host = self._port.host
62         else:
63             self._host = Host()
64             self._port = self._host.port_factory.get(self._options.platform, self._options)
65         self._host._initialize_scm()
66         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
67         self._webkit_base_dir_len = len(self._port.webkit_base())
68         self._base_path = self._port.perf_tests_dir()
69         self._results = {}
70         self._timestamp = time.time()
71
72     @staticmethod
73     def _parse_args(args=None):
74         print_options = printing.print_options()
75
76         perf_option_list = [
77             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
78                 help='Set the configuration to Debug'),
79             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
80                 help='Set the configuration to Release'),
81             optparse.make_option("--platform",
82                 help="Specify port/platform being tested (i.e. chromium-mac)"),
83             optparse.make_option("--builder-name",
84                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
85             optparse.make_option("--build-number",
86                 help=("The build number of the builder running this script.")),
87             optparse.make_option("--build", dest="build", action="store_true", default=True,
88                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
89             optparse.make_option("--build-directory",
90                 help="Path to the directory under which build files are kept (should not include configuration)"),
91             optparse.make_option("--time-out-ms", default=240 * 1000,
92                 help="Set the timeout for each test"),
93             optparse.make_option("--output-json-path",
94                 help="Filename of the JSON file that summaries the results"),
95             optparse.make_option("--source-json-path",
96                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
97             optparse.make_option("--test-results-server",
98                 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
99             ]
100
101         option_list = (perf_option_list + print_options)
102         return optparse.OptionParser(option_list=option_list).parse_args(args)
103
104     def _collect_tests(self):
105         """Return the list of tests found."""
106
107         def _is_test_file(filesystem, dirname, filename):
108             return filename.endswith('.html')
109
110         skipped_directories = set(['.svn', 'resources'])
111         tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
112         return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
113
114     def run(self):
115         if self._options.help_printing:
116             self._printer.help_printing()
117             self._printer.cleanup()
118             return 0
119
120         if not self._port.check_build(needs_http=False):
121             _log.error("Build not up to date for %s" % self._port._path_to_driver())
122             return self._EXIT_CODE_BAD_BUILD
123
124         # We wrap any parts of the run that are slow or likely to raise exceptions
125         # in a try/finally to ensure that we clean up the logging configuration.
126         unexpected = -1
127         try:
128             tests = self._collect_tests()
129             unexpected = self._run_tests_set(sorted(list(tests)), self._port)
130         finally:
131             self._printer.cleanup()
132
133         options = self._options
134         if self._options.output_json_path:
135             # FIXME: Add --branch or auto-detect the branch we're in
136             test_results_server = options.test_results_server
137             branch = self._default_branch if test_results_server else None
138             build_number = int(options.build_number) if options.build_number else None
139             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
140                 branch, options.platform, options.builder_name, build_number) and not unexpected:
141                 return self._EXIT_CODE_BAD_JSON
142             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
143                 return self._EXIT_CODE_FAILED_UPLOADING
144
145         return unexpected
146
147     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
148         revision = self._host.scm().head_svn_revision()
149         contents = {'timestamp': int(timestamp), 'revision': revision, 'results': self._results}
150
151         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
152             if value:
153                 contents[key] = value
154
155         filesystem = self._host.filesystem
156         if source_json_path:
157             try:
158                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
159                 source_json = json.load(source_json_file)
160                 contents = dict(source_json.items() + contents.items())
161                 succeeded = True
162             except IOError, error:
163                 _log.error("Failed to read %s: %s" % (source_json_path, error))
164             except ValueError, error:
165                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
166             except TypeError, error:
167                 _log.error("Failed to merge JSON files: %s" % error)
168             if not succeeded:
169                 return False
170
171         filesystem.write_text_file(output_json_path, json.dumps(contents))
172         return True
173
174     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
175         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
176         try:
177             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
178         except Exception, error:
179             _log.error("Failed to upload JSON file in 120s: %s" % error)
180             return False
181
182         response_body = [line.strip('\n') for line in response]
183         if response_body != ['OK']:
184             _log.error("Uploaded JSON but got a bad response:")
185             for line in response_body:
186                 _log.error(line)
187             return False
188
189         self._printer.write("JSON file uploaded.")
190         return True
191
192     def _print_status(self, tests, expected, unexpected):
193         if len(tests) == expected + unexpected:
194             status = "Ran %d tests" % len(tests)
195         else:
196             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
197         if unexpected:
198             status += " (%d didn't run)" % unexpected
199         self._printer.write(status)
200
201     def _run_tests_set(self, tests, port):
202         result_count = len(tests)
203         expected = 0
204         unexpected = 0
205         driver_need_restart = False
206         driver = None
207
208         for test in tests:
209             if driver_need_restart:
210                 _log.error("%s killing driver" % test)
211                 driver.stop()
212                 driver = None
213             if not driver:
214                 driver = port.create_driver(worker_number=1, no_timeout=True)
215
216             relative_test_path = self._host.filesystem.relpath(test, self._base_path)
217             self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
218
219             is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
220             test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
221             if test_failed:
222                 unexpected = unexpected + 1
223             else:
224                 expected = expected + 1
225
226             self._printer.write('')
227
228         if driver:
229             driver.stop()
230
231         return unexpected
232
233     _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
234
235     def _process_chromium_style_test_result(self, test, output):
236         test_failed = False
237         got_a_result = False
238         for line in re.split('\n', output.text):
239             resultLine = self._inspector_result_regex.match(line)
240             if resultLine:
241                 self._results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
242                 self._buildbot_output.write("%s\n" % line)
243                 got_a_result = True
244             elif not len(line) == 0:
245                 test_failed = True
246                 self._printer.write("%s" % line)
247         return test_failed or not got_a_result
248
249     _lines_to_ignore_in_parser_result = [
250         re.compile(r'^Running \d+ times$'),
251         re.compile(r'^Ignoring warm-up '),
252         re.compile(r'^\d+(.\d+)?$'),
253         # Following are for handle existing test like Dromaeo
254         re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
255         re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)"""))]
256
257     def _should_ignore_line_in_parser_test_result(self, line):
258         if not line:
259             return True
260         for regex in self._lines_to_ignore_in_parser_result:
261             if regex.search(line):
262                 return True
263         return False
264
265     def _process_parser_test_result(self, test, output):
266         got_a_result = False
267         test_failed = False
268         filesystem = self._host.filesystem
269         category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
270         test_name = filesystem.splitext(test_name)[0]
271         results = {}
272         keys = ['avg', 'median', 'stdev', 'min', 'max']
273         score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
274         for line in re.split('\n', output.text):
275             score = score_regex.match(line)
276             if score:
277                 results[score.group(1)] = float(score.group(2))
278                 continue
279
280             if not self._should_ignore_line_in_parser_test_result(line):
281                 test_failed = True
282                 self._printer.write("%s" % line)
283
284         if test_failed or set(keys) != set(results.keys()):
285             return True
286         self._results[filesystem.join(category, test_name).replace('\\', '/')] = results
287         self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
288         self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
289         return False
290
291     def _run_single_test(self, test, driver, is_chromium_style):
292         test_failed = False
293         driver_need_restart = False
294         output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
295
296         if output.text == None:
297             test_failed = True
298         elif output.timeout:
299             self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
300             test_failed = True
301             driver_need_restart = True
302         elif output.crash:
303             self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
304             driver_need_restart = True
305             test_failed = True
306         else:
307             if is_chromium_style:
308                 test_failed = self._process_chromium_style_test_result(test, output)
309             else:
310                 test_failed = self._process_parser_test_result(test, output)
311
312         if len(output.error):
313             self._printer.write('error:\n%s' % output.error)
314             test_failed = True
315
316         if test_failed:
317             self._printer.write('FAILED')
318
319         return test_failed, driver_need_restart