34df002f7449db4eab435ed2e5f563e8fe6e9847
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     EXIT_CODE_BAD_BUILD = -1
53     EXIT_CODE_BAD_SOURCE_JSON = -2
54     EXIT_CODE_BAD_MERGE = -3
55     EXIT_CODE_FAILED_UPLOADING = -4
56     EXIT_CODE_BAD_PREPARATION = -5
57
58     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60     def __init__(self, args=None, port=None):
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68         self._host._initialize_scm()
69         self._webkit_base_dir_len = len(self._port.webkit_base())
70         self._base_path = self._port.perf_tests_dir()
71         self._results = {}
72         self._timestamp = time.time()
73
74     @staticmethod
75     def _parse_args(args=None):
76         perf_option_list = [
77             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
78                 help='Set the configuration to Debug'),
79             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
80                 help='Set the configuration to Release'),
81             optparse.make_option("--platform",
82                 help="Specify port/platform being tested (i.e. chromium-mac)"),
83             optparse.make_option("--chromium",
84                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
85             optparse.make_option("--builder-name",
86                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
87             optparse.make_option("--build-number",
88                 help=("The build number of the builder running this script.")),
89             optparse.make_option("--build", dest="build", action="store_true", default=True,
90                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
91             optparse.make_option("--no-build", dest="build", action="store_false",
92                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
93             optparse.make_option("--build-directory",
94                 help="Path to the directory under which build files are kept (should not include configuration)"),
95             optparse.make_option("--time-out-ms", default=600 * 1000,
96                 help="Set the timeout for each test"),
97             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
98                 help="Pause before running the tests to let user attach a performance monitor."),
99             optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
100                 help="Do no generate results JSON and results page."),
101             optparse.make_option("--output-json-path",
102                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
103             optparse.make_option("--source-json-path",  # FIXME: Rename it to signify the fact it's a slave configuration.
104                 help="Only used on bots. Path to a slave configuration file."),
105             optparse.make_option("--description",
106                 help="Add a description to the output JSON file if one is generated"),
107             optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
108                 help="Don't launch a browser with results after the tests are done"),
109             optparse.make_option("--test-results-server",
110                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
111             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
112                 help="Use WebKitTestRunner rather than DumpRenderTree."),
113             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
114                 help="Run replay tests."),
115             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
116                 help="Run all tests, including the ones in the Skipped list."),
117             ]
118         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
119
120     def _collect_tests(self):
121         """Return the list of tests found."""
122
123         test_extensions = ['.html', '.svg']
124         if self._options.replay:
125             test_extensions.append('.replay')
126
127         def _is_test_file(filesystem, dirname, filename):
128             return filesystem.splitext(filename)[1] in test_extensions
129
130         filesystem = self._host.filesystem
131
132         paths = []
133         for arg in self._args:
134             paths.append(arg)
135             relpath = filesystem.relpath(arg, self._base_path)
136             if relpath:
137                 paths.append(relpath)
138
139         skipped_directories = set(['.svn', 'resources'])
140         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
141         tests = []
142         for path in test_files:
143             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
144             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
145                 continue
146             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
147             tests.append(test)
148
149         return tests
150
151     def run(self):
152         if not self._port.check_build(needs_http=False):
153             _log.error("Build not up to date for %s" % self._port._path_to_driver())
154             return self.EXIT_CODE_BAD_BUILD
155
156         tests = self._collect_tests()
157         _log.info("Running %d tests" % len(tests))
158
159         for test in tests:
160             if not test.prepare(self._options.time_out_ms):
161                 return self.EXIT_CODE_BAD_PREPARATION
162
163         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
164         if self._options.generate_results:
165             exit_code = self._generate_and_show_results()
166             if exit_code:
167                 return exit_code
168
169         return unexpected
170
171     def _output_json_path(self):
172         output_json_path = self._options.output_json_path
173         if output_json_path:
174             return output_json_path
175         return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
176
177     def _generate_and_show_results(self):
178         options = self._options
179         output_json_path = self._output_json_path()
180         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
181
182         if options.source_json_path:
183             output = self._merge_slave_config_json(options.source_json_path, output)
184             if not output:
185                 return self.EXIT_CODE_BAD_SOURCE_JSON
186
187         test_results_server = options.test_results_server
188         results_page_path = None
189         if not test_results_server:
190             output = self._merge_outputs(output_json_path, output)
191             if not output:
192                 return self.EXIT_CODE_BAD_MERGE
193             results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
194
195         self._generate_output_files(output_json_path, results_page_path, output)
196
197         if test_results_server:
198             if not self._upload_json(test_results_server, output_json_path):
199                 return self.EXIT_CODE_FAILED_UPLOADING
200         elif options.show_results:
201             self._port.show_results_html_file(results_page_path)
202
203     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
204         contents = {'results': self._results}
205         if description:
206             contents['description'] = description
207         for (name, path) in self._port.repository_paths():
208             contents[name + '-revision'] = self._host.scm().svn_revision(path)
209
210         # FIXME: Add --branch or auto-detect the branch we're in
211         for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
212             'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
213             if value:
214                 contents[key] = value
215
216         return contents
217
218     def _merge_slave_config_json(self, slave_config_json_path, output):
219         if not self._host.filesystem.isfile(slave_config_json_path):
220             _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
221             return None
222
223         try:
224             slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
225             slave_config = json.load(slave_config_json)
226             return dict(slave_config.items() + output.items())
227         except Exception, error:
228             _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
229         return None
230
231     def _merge_outputs(self, output_json_path, output):
232         if not self._host.filesystem.isfile(output_json_path):
233             return [output]
234         try:
235             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
236             return existing_outputs + [output]
237         except Exception, error:
238             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
239         return None
240
241     def _generate_output_files(self, output_json_path, results_page_path, output):
242         filesystem = self._host.filesystem
243
244         json_output = json.dumps(output)
245         filesystem.write_text_file(output_json_path, json_output)
246
247         if results_page_path:
248             template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
249             template = filesystem.read_text_file(template_path)
250
251             absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
252             results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
253             results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
254
255             filesystem.write_text_file(results_page_path, results_page)
256
257     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
258         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
259         try:
260             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
261         except Exception, error:
262             _log.error("Failed to upload JSON file in 120s: %s" % error)
263             return False
264
265         response_body = [line.strip('\n') for line in response]
266         if response_body != ['OK']:
267             _log.error("Uploaded JSON but got a bad response:")
268             for line in response_body:
269                 _log.error(line)
270             return False
271
272         _log.info("JSON file uploaded.")
273         return True
274
275     def _print_status(self, tests, expected, unexpected):
276         if len(tests) == expected + unexpected:
277             status = "Ran %d tests" % len(tests)
278         else:
279             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
280         if unexpected:
281             status += " (%d didn't run)" % unexpected
282         _log.info(status)
283
284     def _run_tests_set(self, tests, port):
285         result_count = len(tests)
286         expected = 0
287         unexpected = 0
288         driver = None
289
290         for test in tests:
291             driver = port.create_driver(worker_number=1, no_timeout=True)
292
293             if self._options.pause_before_testing:
294                 driver.start()
295                 if not self._host.user.confirm("Ready to run test?"):
296                     driver.stop()
297                     return unexpected
298
299             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
300             if self._run_single_test(test, driver):
301                 expected = expected + 1
302             else:
303                 unexpected = unexpected + 1
304
305             _log.info('')
306
307             driver.stop()
308
309         return unexpected
310
311     def _run_single_test(self, test, driver):
312         start_time = time.time()
313
314         new_results = test.run(driver, self._options.time_out_ms)
315         if new_results:
316             self._results.update(new_results)
317         else:
318             _log.error('FAILED')
319
320         _log.debug("Finished: %f s" % (time.time() - start_time))
321
322         return new_results != None