[WinCairo] httpd service install needs to precede server start
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 # Copyright (C) 2012 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 """Run Inspector's perf tests in perf mode."""
30
31 import os
32 import json
33 import logging
34 import optparse
35 import time
36 import datetime
37
38 from webkitpy.common import find_files
39 from webkitpy.common.checkout.scm.detection import SCMDetector
40 from webkitpy.common.config.urls import view_source_url
41 from webkitpy.common.host import Host
42 from webkitpy.common.net.file_uploader import FileUploader
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     EXIT_CODE_BAD_BUILD = -1
53     EXIT_CODE_BAD_SOURCE_JSON = -2
54     EXIT_CODE_BAD_MERGE = -3
55     EXIT_CODE_FAILED_UPLOADING = -4
56     EXIT_CODE_BAD_PREPARATION = -5
57
58     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60     def __init__(self, args=None, port=None):
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68
69         # Timeouts are controlled by the Python Driver, so DRT/WTR runs with no-timeout.
70         self._options.additional_drt_flag.append('--no-timeout')
71
72         # The GTK+ port only supports WebKit2, so it always uses WKTR.
73         if self._port.name().startswith("gtk"):
74             self._options.webkit_test_runner = True
75
76         self._host.initialize_scm()
77         self._webkit_base_dir_len = len(self._port.webkit_base())
78         self._base_path = self._port.perf_tests_dir()
79         self._timestamp = time.time()
80         self._utc_timestamp = datetime.datetime.utcnow()
81
82     @staticmethod
83     def _parse_args(args=None):
84         def _expand_path(option, opt_str, value, parser):
85             path = os.path.expandvars(os.path.expanduser(value))
86             setattr(parser.values, option.dest, path)
87         perf_option_list = [
88             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
89                 help='Set the configuration to Debug'),
90             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
91                 help='Set the configuration to Release'),
92             optparse.make_option("--platform",
93                 help="Specify port/platform being tested (i.e. chromium-mac)"),
94             optparse.make_option("--builder-name",
95                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
96             optparse.make_option("--build-number",
97                 help=("The build number of the builder running this script.")),
98             optparse.make_option("--build", dest="build", action="store_true", default=True,
99                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
100             optparse.make_option("--no-build", dest="build", action="store_false",
101                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
102             optparse.make_option("--build-directory",
103                 help="Path to the directory under which build files are kept (should not include configuration)"),
104             optparse.make_option("--time-out-ms", default=600 * 1000,
105                 help="Set the timeout for each test"),
106             optparse.make_option("--no-timeout", action="store_true", default=False,
107                 help="Disable test timeouts"),
108             optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
109                 help="Do no generate results JSON and results page."),
110             optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
111                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
112             optparse.make_option("--reset-results", action="store_true",
113                 help="Clears the content in the generated JSON file before adding the results."),
114             optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
115                 help="Only used on bots. Path to a slave configuration file."),
116             optparse.make_option("--description",
117                 help="Add a description to the output JSON file if one is generated"),
118             optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
119                 help="Don't launch a browser with results after the tests are done"),
120             optparse.make_option("--test-results-server",
121                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
122             optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
123                 help="Use DumpRenderTree rather than WebKitTestRunner."),
124             optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
125                 help="Run all tests, including the ones in the Skipped list."),
126             optparse.make_option("--profile", action="store_true",
127                 help="Output per-test profile information."),
128             optparse.make_option("--profiler", action="store",
129                 help="Output per-test profile information, using the specified profiler."),
130             optparse.make_option("--additional-drt-flag", action="append",
131                 default=[], help="Additional command line flag to pass to DumpRenderTree "
132                      "Specify multiple times to add multiple flags."),
133             optparse.make_option("--driver-name", type="string",
134                 help="Alternative DumpRenderTree binary to use"),
135             optparse.make_option("--repeat", default=1, type="int",
136                 help="Specify number of times to run test set (default: 1)."),
137             optparse.make_option("--test-runner-count", default=-1, type="int",
138                 help="Specify number of times to invoke test runner for each performance test."),
139             optparse.make_option("--wrapper",
140                 help="wrapper command to insert before invocations of "
141                  "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
142                  "running. (Example: --wrapper='valgrind --smc-check=all')"),
143             optparse.make_option('--display-server', choices=['xvfb', 'xorg', 'weston', 'wayland'], default='xvfb',
144                 help='"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
145                      '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'),
146             ]
147         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
148
149     def _collect_tests(self):
150         test_extensions = ['.html', '.svg']
151
152         def _is_test_file(filesystem, dirname, filename):
153             return filesystem.splitext(filename)[1] in test_extensions
154
155         filesystem = self._host.filesystem
156
157         paths = []
158         for arg in self._args:
159             if filesystem.exists(filesystem.join(self._base_path, arg)):
160                 paths.append(arg)
161             else:
162                 relpath = filesystem.relpath(arg, self._base_path)
163                 if filesystem.exists(filesystem.join(self._base_path, relpath)):
164                     paths.append(filesystem.normpath(relpath))
165                 else:
166                     _log.warn('Path was not found:' + arg)
167
168         skipped_directories = set(['.svn', 'resources'])
169         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
170         tests = []
171
172         test_runner_count = DEFAULT_TEST_RUNNER_COUNT
173         if self._options.test_runner_count > 0:
174             test_runner_count = self._options.test_runner_count
175         elif self._options.profile:
176             test_runner_count = 1
177
178         for path in test_files:
179             relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
180             if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
181                 continue
182             if relative_path.endswith('/index.html'):
183                 relative_path = relative_path[0:-len('/index.html')]
184             test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=test_runner_count)
185             tests.append(test)
186
187         return tests
188
189     def run(self):
190         if "Debug" == self._port.get_option("configuration"):
191             _log.warning("""****************************************************
192 * WARNING: run-perf-tests is running in DEBUG mode *
193 ****************************************************""")
194
195         if not self._port.check_build():
196             _log.error("Build not up to date for %s" % self._port._path_to_driver())
197             return self.EXIT_CODE_BAD_BUILD
198
199         # Check that the system dependencies (themes, fonts, ...) are correct.
200         if not self._port.check_sys_deps():
201             _log.error("Failed to check system dependencies.")
202             self._port.stop_helper()
203             return self.EXIT_CODE_BAD_PREPARATION
204
205         run_count = 0
206         repeat = self._options.repeat
207         while (run_count < repeat):
208             run_count += 1
209
210             tests = self._collect_tests()
211             runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
212             _log.info("Running %d tests%s" % (len(tests), runs))
213
214             for test in tests:
215                 if not test.prepare(self._options.time_out_ms):
216                     return self.EXIT_CODE_BAD_PREPARATION
217
218             unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
219
220             if self._options.generate_results and not self._options.profile:
221                 exit_code = self._generate_results()
222                 if exit_code:
223                     return exit_code
224
225         if self._options.generate_results and not self._options.profile:
226             test_results_server = self._options.test_results_server
227             if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
228                 return self.EXIT_CODE_FAILED_UPLOADING
229
230             if self._options.show_results:
231                 self._port.show_results_html_file(self._results_page_path())
232
233         return unexpected
234
235     def _output_json_path(self):
236         output_json_path = self._options.output_json_path
237         if output_json_path:
238             return output_json_path
239         return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
240
241     def _results_page_path(self):
242         return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
243
244     def _generate_results(self):
245         options = self._options
246         output_json_path = self._output_json_path()
247         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
248
249         if options.slave_config_json_path:
250             output = self._merge_slave_config_json(options.slave_config_json_path, output)
251             if not output:
252                 return self.EXIT_CODE_BAD_SOURCE_JSON
253
254         output = self._merge_outputs_if_needed(output_json_path, output)
255         if not output:
256             return self.EXIT_CODE_BAD_MERGE
257
258         filesystem = self._host.filesystem
259         json_output = json.dumps(output)
260         filesystem.write_text_file(output_json_path, json_output)
261
262         template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
263         template = filesystem.read_text_file(template_path)
264
265         absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
266         results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
267         results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
268
269         filesystem.write_text_file(self._results_page_path(), results_page)
270
271     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
272         revisions = {}
273         for (name, path) in self._port.repository_paths():
274             scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
275             revision = scm.native_revision(path)
276             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
277
278         meta_info = {
279             'description': description,
280             'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
281             'platform': platform,
282             'revisions': revisions,
283             'builderName': builder_name,
284             'buildNumber': int(build_number) if build_number else None}
285
286         contents = {'tests': {}}
287         for key, value in meta_info.items():
288             if value:
289                 contents[key] = value
290
291         for metric in self._results:
292             tests = contents['tests']
293             path = metric.path()
294             for i in range(0, len(path)):
295                 is_last_token = i + 1 == len(path)
296                 url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
297                 test_name = path[i]
298
299                 tests.setdefault(test_name, {'url': url})
300                 current_test = tests[test_name]
301                 if is_last_token:
302                     current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
303                     current_test.setdefault('metrics', {})
304                     assert metric.name() not in current_test['metrics']
305                     test_results = {'current': metric.grouped_iteration_values()}
306                     if metric.aggregator():
307                         test_results['aggregators'] = [metric.aggregator()]
308                     current_test['metrics'][metric.name()] = test_results
309                 else:
310                     current_test.setdefault('tests', {})
311                     tests = current_test['tests']
312
313         return contents
314
315     @staticmethod
316     def _datetime_in_ES5_compatible_iso_format(datetime):
317         return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
318
319     def _merge_slave_config_json(self, slave_config_json_path, contents):
320         if not self._host.filesystem.isfile(slave_config_json_path):
321             _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
322             return None
323
324         try:
325             slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
326             slave_config = json.load(slave_config_json)
327             for key in slave_config:
328                 contents['builder' + key.capitalize()] = slave_config[key]
329             return contents
330         except Exception as error:
331             _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
332         return None
333
334     def _merge_outputs_if_needed(self, output_json_path, output):
335         if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
336             return [output]
337         try:
338             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
339             return existing_outputs + [output]
340         except Exception as error:
341             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
342         return None
343
344     def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
345         hypertext_protocol = ''
346         if not test_results_server.startswith('http'):
347             hypertext_protocol = 'https://'
348         url = hypertext_protocol + test_results_server + host_path
349         uploader = file_uploader(url, 120)
350         try:
351             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
352         except Exception as error:
353             _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
354             return False
355
356         response_body = [line.strip('\n') for line in response]
357         if response_body != ['OK']:
358             try:
359                 parsed_response = json.loads('\n'.join(response_body))
360             except:
361                 _log.error("Uploaded JSON to %s but got a bad response:" % url)
362                 for line in response_body:
363                     _log.error(line)
364                 return False
365             if parsed_response.get('status') != 'OK':
366                 _log.error("Uploaded JSON to %s but got an error:" % url)
367                 _log.error(json.dumps(parsed_response, indent=4))
368                 return False
369
370         _log.info("JSON file uploaded to %s." % url)
371         return True
372
373     def _run_tests_set(self, tests):
374         result_count = len(tests)
375         failures = 0
376         self._results = []
377
378         for i, test in enumerate(tests):
379             _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
380             start_time = time.time()
381             metrics = test.run(self._options.time_out_ms, self._options.no_timeout)
382
383             if metrics:
384                 self._results += metrics
385             else:
386                 failures += 1
387                 _log.error('FAILED')
388
389             _log.info('Finished: %f s' % (time.time() - start_time))
390             _log.info('')
391
392         return failures