[PerformanceTests] tests have dependencies
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.port.driver import DriverInput
43 from webkitpy.layout_tests.views import printing
44
45 _log = logging.getLogger(__name__)
46
47
48 class PerfTestsRunner(object):
49     _perf_tests_base_dir = 'PerformanceTests'
50     _initial_page_relative_path = 'resources/init.html'
51     _test_directories_for_chromium_style_tests = ['inspector']
52     _default_branch = 'webkit-trunk'
53     _EXIT_CODE_BAD_BUILD = -1
54     _EXIT_CODE_BAD_JSON = -2
55     _EXIT_CODE_FAILED_UPLOADING = -3
56
57     def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
58         self._buildbot_output = buildbot_output
59         self._options, self._args = PerfTestsRunner._parse_args(args)
60         if port:
61             self._port = port
62             self._host = self._port.host
63         else:
64             self._host = Host()
65             self._port = self._host.port_factory.get(self._options.platform, self._options)
66         self._host._initialize_scm()
67         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
68         self._webkit_base_dir_len = len(self._port.webkit_base())
69         self._base_path = self._port.perf_tests_dir()
70         self._initial_page_path = self._host.filesystem.join(self._base_path, self._initial_page_relative_path)
71         self._results = {}
72         self._timestamp = time.time()
73
74     @staticmethod
75     def _parse_args(args=None):
76         print_options = printing.print_options()
77
78         perf_option_list = [
79             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
80                 help='Set the configuration to Debug'),
81             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
82                 help='Set the configuration to Release'),
83             optparse.make_option("--platform",
84                 help="Specify port/platform being tested (i.e. chromium-mac)"),
85             optparse.make_option("--builder-name",
86                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
87             optparse.make_option("--build-number",
88                 help=("The build number of the builder running this script.")),
89             optparse.make_option("--build", dest="build", action="store_true", default=True,
90                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
91             optparse.make_option("--build-directory",
92                 help="Path to the directory under which build files are kept (should not include configuration)"),
93             optparse.make_option("--time-out-ms", default=600 * 1000,
94                 help="Set the timeout for each test"),
95             optparse.make_option("--output-json-path",
96                 help="Filename of the JSON file that summaries the results"),
97             optparse.make_option("--source-json-path",
98                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
99             optparse.make_option("--test-results-server",
100                 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
101             ]
102
103         option_list = (perf_option_list + print_options)
104         return optparse.OptionParser(option_list=option_list).parse_args(args)
105
106     def _collect_tests(self):
107         """Return the list of tests found."""
108
109         def _is_test_file(filesystem, dirname, filename):
110             return filename.endswith('.html')
111
112         skipped_directories = set(['.svn', 'resources'])
113         tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
114         return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
115
116     def run(self):
117         if self._options.help_printing:
118             self._printer.help_printing()
119             self._printer.cleanup()
120             return 0
121
122         if not self._port.check_build(needs_http=False):
123             _log.error("Build not up to date for %s" % self._port._path_to_driver())
124             return self._EXIT_CODE_BAD_BUILD
125
126         # We wrap any parts of the run that are slow or likely to raise exceptions
127         # in a try/finally to ensure that we clean up the logging configuration.
128         unexpected = -1
129         try:
130             tests = self._collect_tests()
131             unexpected = self._run_tests_set(sorted(list(tests)), self._port)
132         finally:
133             self._printer.cleanup()
134
135         options = self._options
136         if self._options.output_json_path:
137             # FIXME: Add --branch or auto-detect the branch we're in
138             test_results_server = options.test_results_server
139             branch = self._default_branch if test_results_server else None
140             build_number = int(options.build_number) if options.build_number else None
141             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
142                 branch, options.platform, options.builder_name, build_number) and not unexpected:
143                 return self._EXIT_CODE_BAD_JSON
144             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
145                 return self._EXIT_CODE_FAILED_UPLOADING
146
147         return unexpected
148
149     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
150         revision = self._host.scm().head_svn_revision()
151         contents = {'timestamp': int(timestamp), 'revision': revision, 'results': self._results}
152
153         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
154             if value:
155                 contents[key] = value
156
157         filesystem = self._host.filesystem
158         if source_json_path:
159             try:
160                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
161                 source_json = json.load(source_json_file)
162                 contents = dict(source_json.items() + contents.items())
163                 succeeded = True
164             except IOError, error:
165                 _log.error("Failed to read %s: %s" % (source_json_path, error))
166             except ValueError, error:
167                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
168             except TypeError, error:
169                 _log.error("Failed to merge JSON files: %s" % error)
170             if not succeeded:
171                 return False
172
173         filesystem.write_text_file(output_json_path, json.dumps(contents))
174         return True
175
176     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
177         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
178         try:
179             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
180         except Exception, error:
181             _log.error("Failed to upload JSON file in 120s: %s" % error)
182             return False
183
184         response_body = [line.strip('\n') for line in response]
185         if response_body != ['OK']:
186             _log.error("Uploaded JSON but got a bad response:")
187             for line in response_body:
188                 _log.error(line)
189             return False
190
191         self._printer.write("JSON file uploaded.")
192         return True
193
194     def _print_status(self, tests, expected, unexpected):
195         if len(tests) == expected + unexpected:
196             status = "Ran %d tests" % len(tests)
197         else:
198             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
199         if unexpected:
200             status += " (%d didn't run)" % unexpected
201         self._printer.write(status)
202
203     def _run_tests_set(self, tests, port):
204         result_count = len(tests)
205         expected = 0
206         unexpected = 0
207
208         for test in tests:
209             driver = port.create_driver(worker_number=1, no_timeout=True)
210
211             relative_test_path = self._host.filesystem.relpath(test, self._base_path)
212             self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
213
214             is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
215             if self._run_single_test(test, driver, is_chromium_style):
216                 expected = expected + 1
217             else:
218                 unexpected = unexpected + 1
219
220             self._printer.write('')
221
222             driver.stop()
223
224         return unexpected
225
226     _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
227
228     def _process_chromium_style_test_result(self, test, output):
229         test_failed = False
230         got_a_result = False
231         for line in re.split('\n', output.text):
232             resultLine = self._inspector_result_regex.match(line)
233             if resultLine:
234                 self._results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
235                 self._buildbot_output.write("%s\n" % line)
236                 got_a_result = True
237             elif not len(line) == 0:
238                 test_failed = True
239                 self._printer.write("%s" % line)
240         return test_failed or not got_a_result
241
242     _lines_to_ignore_in_parser_result = [
243         re.compile(r'^Running \d+ times$'),
244         re.compile(r'^Ignoring warm-up '),
245         re.compile(r'^\d+(.\d+)?$'),
246         # Following are for handle existing test like Dromaeo
247         re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
248         re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)"""))]
249
250     def _should_ignore_line_in_parser_test_result(self, line):
251         if not line:
252             return True
253         for regex in self._lines_to_ignore_in_parser_result:
254             if regex.search(line):
255                 return True
256         return False
257
258     def _process_parser_test_result(self, test, output):
259         got_a_result = False
260         test_failed = False
261         filesystem = self._host.filesystem
262         category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
263         test_name = filesystem.splitext(test_name)[0]
264         results = {}
265         keys = ['avg', 'median', 'stdev', 'min', 'max']
266         score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
267         for line in re.split('\n', output.text):
268             score = score_regex.match(line)
269             if score:
270                 results[score.group(1)] = float(score.group(2))
271                 continue
272
273             if not self._should_ignore_line_in_parser_test_result(line):
274                 test_failed = True
275                 self._printer.write("%s" % line)
276
277         if test_failed or set(keys) != set(results.keys()):
278             return True
279         self._results[filesystem.join(category, test_name).replace('\\', '/')] = results
280         self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
281         self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
282         return False
283
284     def _run_single_test(self, test, driver, is_chromium_style):
285         test_failed = False
286         output = driver.run_test(DriverInput(self._initial_page_path, 10000, None, False))
287         if output.text != 'PASS\n':
288             self._printer.write('Initialization page failed to load.')
289         output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
290
291         if output.text == None:
292             test_failed = True
293         elif output.timeout:
294             self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
295             test_failed = True
296         elif output.crash:
297             self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
298             test_failed = True
299         else:
300             if is_chromium_style:
301                 test_failed = self._process_chromium_style_test_result(test, output)
302             else:
303                 test_failed = self._process_parser_test_result(test, output)
304
305         if len(output.error):
306             self._printer.write('error:\n%s' % output.error)
307             test_failed = True
308
309         if test_failed:
310             self._printer.write('FAILED')
311
312         return not test_failed