replace-webkit-additions-includes should assume unknown or empty deployment targets...
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 # Copyright (C) 2012 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 """Run Inspector's perf tests in perf mode."""
30
31 import os
32 import json
33 import logging
34 import optparse
35 import sys
36 import time
37 import datetime
38
39 from webkitpy.common import find_files
40 from webkitpy.common.checkout.scm.detection import SCMDetector
41 from webkitpy.common.config.urls import view_source_url
42 from webkitpy.common.host import Host
43 from webkitpy.common.net.file_uploader import FileUploader
44 from webkitpy.performance_tests.perftest import PerfTestFactory
45 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
46 from webkitcorepy import string_utils
47
48
49 _log = logging.getLogger(__name__)
50
51
52 class PerfTestsRunner(object):
53     _default_branch = 'webkit-trunk'
54     EXIT_CODE_BAD_BUILD = -1
55     EXIT_CODE_BAD_SOURCE_JSON = -2
56     EXIT_CODE_BAD_MERGE = -3
57     EXIT_CODE_FAILED_UPLOADING = -4
58     EXIT_CODE_BAD_PREPARATION = -5
59
60     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
61
62     def __init__(self, args=None, port=None):
63         self._options, self._args = PerfTestsRunner._parse_args(args)
64         if port:
65             self._port = port
66             self._host = self._port.host
67         else:
68             self._host = Host()
69             self._port = self._host.port_factory.get(self._options.platform, self._options)
70
71         # Timeouts are controlled by the Python Driver, so DRT/WTR runs with no-timeout.
72         self._options.additional_drt_flag.append('--no-timeout')
73
74         # The GTK+ port only supports WebKit2, so it always uses WKTR.
75         if self._port.name().startswith("gtk"):
76             self._options.webkit_test_runner = True
77
78         self._host.initialize_scm()
79         self._webkit_base_dir_len = len(self._port.webkit_base())
80         self._base_path = self._port.perf_tests_dir()
81         self._timestamp = time.time()
82         self._utc_timestamp = datetime.datetime.utcnow()
83
84     @staticmethod
85     def _parse_args(args=None):
86         def _expand_path(option, opt_str, value, parser):
87             path = os.path.expandvars(os.path.expanduser(value))
88             setattr(parser.values, option.dest, path)
89         perf_option_list = [
90             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
91                 help='Set the configuration to Debug'),
92             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
93                 help='Set the configuration to Release'),
94             optparse.make_option("--platform",
95                 help="Specify port/platform being tested (i.e. chromium-mac)"),
96             optparse.make_option("--builder-name",
97                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
98             optparse.make_option("--build-number",
99                 help=("The build number of the builder running this script.")),
100             optparse.make_option("--build", dest="build", action="store_true", default=True,
101                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
102             optparse.make_option("--no-build", dest="build", action="store_false",
103                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
104             optparse.make_option("--build-directory",
105                 help="Path to the directory under which build files are kept (should not include configuration)"),
106             optparse.make_option("--time-out-ms", default=600 * 1000,
107                 help="Set the timeout for each test"),
108             optparse.make_option("--no-timeout", action="store_true", default=False,
109                 help="Disable test timeouts"),
110             optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
111                 help="Do no generate results JSON and results page."),
112             optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
113                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
114             optparse.make_option("--reset-results", action="store_true",
115                 help="Clears the content in the generated JSON file before adding the results."),
116             optparse.make_option("--worker-config-json-path", action='callback',
117                 callback=_expand_path, type="str", dest="worker_config_json_path",
118                 help="Only used on bots. Path to a worker configuration file."),
119             optparse.make_option("--description",
120                 help="Add a description to the output JSON file if one is generated"),
121             optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
122                 help="Don't launch a browser with results after the tests are done"),
123             optparse.make_option("--test-results-server",
124                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
125             optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
126                 help="Use DumpRenderTree rather than WebKitTestRunner."),
127             optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
128                 help="Run all tests, including the ones in the Skipped list."),
129             optparse.make_option("--profile", action="store_true",
130                 help="Output per-test profile information."),
131             optparse.make_option("--profiler", action="store",
132                 help="Output per-test profile information, using the specified profiler."),
133             optparse.make_option("--additional-drt-flag", action="append",
134                 default=[], help="Additional command line flag to pass to DumpRenderTree "
135                      "Specify multiple times to add multiple flags."),
136             optparse.make_option("--driver-name", type="string",
137                 help="Alternative DumpRenderTree binary to use"),
138             optparse.make_option("--repeat", default=1, type="int",
139                 help="Specify number of times to run test set (default: 1)."),
140             optparse.make_option("--test-runner-count", default=-1, type="int",
141                 help="Specify number of times to invoke test runner for each performance test."),
142             optparse.make_option("--wrapper",
143                 help="wrapper command to insert before invocations of "
144                  "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
145                  "running. (Example: --wrapper='valgrind --smc-check=all')"),
146             optparse.make_option('--display-server', choices=['xvfb', 'xorg', 'weston', 'wayland'], default='xvfb',
147                 help='"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
148                      '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'),
149             ]
150         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
151
152     def _collect_tests(self):
153         test_extensions = ['.html', '.svg']
154
155         def _is_test_file(filesystem, dirname, filename):
156             return filesystem.splitext(filename)[1] in test_extensions
157
158         filesystem = self._host.filesystem
159
160         paths = []
161         for arg in self._args:
162             if filesystem.exists(filesystem.join(self._base_path, arg)):
163                 paths.append(arg)
164             else:
165                 relpath = filesystem.relpath(arg, self._base_path)
166                 if filesystem.exists(filesystem.join(self._base_path, relpath)):
167                     paths.append(filesystem.normpath(relpath))
168                 else:
169                     _log.warn('Path was not found:' + arg)
170
171         skipped_directories = set(['.svn', 'resources'])
172         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
173         tests = []
174
175         test_runner_count = DEFAULT_TEST_RUNNER_COUNT
176         if self._options.test_runner_count > 0:
177             test_runner_count = self._options.test_runner_count
178         elif self._options.profile:
179             test_runner_count = 1
180
181         for path in test_files:
182             relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
183             if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
184                 continue
185             if relative_path.endswith('/index.html'):
186                 relative_path = relative_path[0:-len('/index.html')]
187             test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=test_runner_count)
188             tests.append(test)
189
190         return tests
191
192     def run(self):
193         if "Debug" == self._port.get_option("configuration"):
194             _log.warning("""****************************************************
195 * WARNING: run-perf-tests is running in DEBUG mode *
196 ****************************************************""")
197
198         if not self._port.check_build():
199             _log.error("Build not up to date for %s" % self._port._path_to_driver())
200             return self.EXIT_CODE_BAD_BUILD
201
202         # Check that the system dependencies (themes, fonts, ...) are correct.
203         if not self._port.check_sys_deps():
204             _log.error("Failed to check system dependencies.")
205             self._port.stop_helper()
206             return self.EXIT_CODE_BAD_PREPARATION
207
208         run_count = 0
209         repeat = self._options.repeat
210         while (run_count < repeat):
211             run_count += 1
212
213             tests = self._collect_tests()
214             runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
215             _log.info("Running %d tests%s" % (len(tests), runs))
216
217             for test in tests:
218                 if not test.prepare(self._options.time_out_ms):
219                     return self.EXIT_CODE_BAD_PREPARATION
220
221             unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
222
223             if self._options.generate_results and not self._options.profile:
224                 exit_code = self._generate_results()
225                 if exit_code:
226                     return exit_code
227
228         if self._options.generate_results and not self._options.profile:
229             test_results_server = self._options.test_results_server
230             if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
231                 return self.EXIT_CODE_FAILED_UPLOADING
232
233             if self._options.show_results:
234                 self._port.show_results_html_file(self._results_page_path())
235
236         return unexpected
237
238     def _output_json_path(self):
239         output_json_path = self._options.output_json_path
240         if output_json_path:
241             return output_json_path
242         return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
243
244     def _results_page_path(self):
245         return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
246
247     def _generate_results(self):
248         options = self._options
249         output_json_path = self._output_json_path()
250         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
251
252         if options.worker_config_json_path:
253             output = self._merge_worker_config_json(options.worker_config_json_path, output)
254             if not output:
255                 return self.EXIT_CODE_BAD_SOURCE_JSON
256
257         output = self._merge_outputs_if_needed(output_json_path, output)
258         if not output:
259             return self.EXIT_CODE_BAD_MERGE
260
261         filesystem = self._host.filesystem
262         json_output = json.dumps(output)
263         filesystem.write_text_file(output_json_path, json_output)
264
265         template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
266         template = filesystem.read_text_file(template_path)
267
268         absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
269         results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
270         results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
271
272         filesystem.write_text_file(self._results_page_path(), results_page)
273
274     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
275         revisions = {}
276         for (name, path) in self._port.repository_paths():
277             scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
278             revision = scm.native_revision(path)
279             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
280
281         meta_info = {
282             'description': description,
283             'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
284             'platform': platform,
285             'revisions': revisions,
286             'builderName': builder_name,
287             'buildNumber': int(build_number) if build_number else None}
288
289         contents = {'tests': {}}
290         for key, value in list(meta_info.items()):
291             if value:
292                 contents[key] = value
293
294         for metric in self._results:
295             tests = contents['tests']
296             path = metric.path()
297             for i in range(0, len(path)):
298                 is_last_token = i + 1 == len(path)
299                 url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
300                 test_name = path[i]
301
302                 tests.setdefault(test_name, {'url': url})
303                 current_test = tests[test_name]
304                 if is_last_token:
305                     current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
306                     current_test.setdefault('metrics', {})
307                     assert metric.name() not in current_test['metrics']
308                     test_results = {'current': metric.grouped_iteration_values()}
309                     if metric.aggregator():
310                         test_results['aggregators'] = [metric.aggregator()]
311                     current_test['metrics'][metric.name()] = test_results
312                 else:
313                     current_test.setdefault('tests', {})
314                     tests = current_test['tests']
315
316         return contents
317
318     @staticmethod
319     def _datetime_in_ES5_compatible_iso_format(datetime):
320         return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
321
322     def _merge_worker_config_json(self, worker_config_json_path, contents):
323         if not self._host.filesystem.isfile(worker_config_json_path):
324             _log.error('Missing worker configuration JSON file: {}'.format(worker_config_json_path))
325             return None
326
327         try:
328             worker_config_json = self._host.filesystem.open_text_file_for_reading(worker_config_json_path)
329             worker_config = json.load(worker_config_json)
330             for key in worker_config:
331                 contents['builder' + key.capitalize()] = worker_config[key]
332             return contents
333         except Exception as error:
334             _log.error('Failed to merge worker configuration JSON file {}: {}'.format(worker_config_json_path, error))
335         return None
336
337     def _merge_outputs_if_needed(self, output_json_path, output):
338         if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
339             return [output]
340         try:
341             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
342             return existing_outputs + [output]
343         except Exception as error:
344             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
345         return None
346
347     def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
348         hypertext_protocol = ''
349         if not test_results_server.startswith('http'):
350             hypertext_protocol = 'https://'
351         url = hypertext_protocol + test_results_server + host_path
352         uploader = file_uploader(url, 120)
353         try:
354             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
355         except Exception as error:
356             _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
357             return False
358
359         response_body = [string_utils.decode(line, target_type=str).strip('\n') for line in response]
360         if response_body != ['OK']:
361             try:
362                 parsed_response = json.loads('\n'.join(response_body))
363             except:
364                 _log.error("Uploaded JSON to %s but got a bad response:" % url)
365                 for line in response_body:
366                     _log.error(line)
367                 return False
368             if parsed_response.get('status') != 'OK':
369                 _log.error("Uploaded JSON to %s but got an error:" % url)
370                 _log.error(json.dumps(parsed_response, indent=4))
371                 return False
372
373         _log.info("JSON file uploaded to %s." % url)
374         return True
375
376     def _run_tests_set(self, tests):
377         result_count = len(tests)
378         failures = 0
379         self._results = []
380
381         for i, test in enumerate(tests):
382             _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
383             start_time = time.time()
384             metrics = test.run(self._options.time_out_ms, self._options.no_timeout)
385
386             if metrics:
387                 self._results += metrics
388             else:
389                 failures += 1
390                 _log.error('FAILED')
391
392             _log.info('Finished: %f s' % (time.time() - start_time))
393             _log.info('')
394
395         return failures