run-perf-tests should record indivisual value instead of statistics
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     EXIT_CODE_BAD_BUILD = -1
53     EXIT_CODE_BAD_SOURCE_JSON = -2
54     EXIT_CODE_BAD_MERGE = -3
55     EXIT_CODE_FAILED_UPLOADING = -4
56     EXIT_CODE_BAD_PREPARATION = -5
57
58     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60     def __init__(self, args=None, port=None):
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68         self._host._initialize_scm()
69         self._webkit_base_dir_len = len(self._port.webkit_base())
70         self._base_path = self._port.perf_tests_dir()
71         self._results = {}
72         self._timestamp = time.time()
73
74     @staticmethod
75     def _parse_args(args=None):
76         perf_option_list = [
77             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
78                 help='Set the configuration to Debug'),
79             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
80                 help='Set the configuration to Release'),
81             optparse.make_option("--platform",
82                 help="Specify port/platform being tested (i.e. chromium-mac)"),
83             optparse.make_option("--chromium",
84                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
85             optparse.make_option("--builder-name",
86                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
87             optparse.make_option("--build-number",
88                 help=("The build number of the builder running this script.")),
89             optparse.make_option("--build", dest="build", action="store_true", default=True,
90                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
91             optparse.make_option("--no-build", dest="build", action="store_false",
92                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
93             optparse.make_option("--build-directory",
94                 help="Path to the directory under which build files are kept (should not include configuration)"),
95             optparse.make_option("--time-out-ms", default=600 * 1000,
96                 help="Set the timeout for each test"),
97             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
98                 help="Pause before running the tests to let user attach a performance monitor."),
99             optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
100                 help="Do no generate results JSON and results page."),
101             optparse.make_option("--output-json-path",
102                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
103             optparse.make_option("--source-json-path",  # FIXME: Rename it to signify the fact it's a slave configuration.
104                 help="Only used on bots. Path to a slave configuration file."),
105             optparse.make_option("--description",
106                 help="Add a description to the output JSON file if one is generated"),
107             optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
108                 help="Don't launch a browser with results after the tests are done"),
109             optparse.make_option("--test-results-server",
110                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
111             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
112                 help="Use WebKitTestRunner rather than DumpRenderTree."),
113             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
114                 help="Run replay tests."),
115             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
116                 help="Run all tests, including the ones in the Skipped list."),
117             ]
118         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
119
120     def _collect_tests(self):
121         """Return the list of tests found."""
122
123         test_extensions = ['.html', '.svg']
124         if self._options.replay:
125             test_extensions.append('.replay')
126
127         def _is_test_file(filesystem, dirname, filename):
128             return filesystem.splitext(filename)[1] in test_extensions
129
130         filesystem = self._host.filesystem
131
132         paths = []
133         for arg in self._args:
134             paths.append(arg)
135             relpath = filesystem.relpath(arg, self._base_path)
136             if relpath:
137                 paths.append(relpath)
138
139         skipped_directories = set(['.svn', 'resources'])
140         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
141         tests = []
142         for path in test_files:
143             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
144             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
145                 continue
146             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
147             tests.append(test)
148
149         return tests
150
151     def run(self):
152         if not self._port.check_build(needs_http=False):
153             _log.error("Build not up to date for %s" % self._port._path_to_driver())
154             return self.EXIT_CODE_BAD_BUILD
155
156         tests = self._collect_tests()
157         _log.info("Running %d tests" % len(tests))
158
159         for test in tests:
160             if not test.prepare(self._options.time_out_ms):
161                 return self.EXIT_CODE_BAD_PREPARATION
162
163         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
164         if self._options.generate_results:
165             exit_code = self._generate_and_show_results()
166             if exit_code:
167                 return exit_code
168
169         return unexpected
170
171     def _output_json_path(self):
172         output_json_path = self._options.output_json_path
173         if output_json_path:
174             return output_json_path
175         return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
176
177     def _generate_and_show_results(self):
178         options = self._options
179         output_json_path = self._output_json_path()
180         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
181
182         if options.source_json_path:
183             output = self._merge_slave_config_json(options.source_json_path, output)
184             if not output:
185                 return self.EXIT_CODE_BAD_SOURCE_JSON
186
187         test_results_server = options.test_results_server
188         results_page_path = None
189         if not test_results_server:
190             output = self._merge_outputs(output_json_path, output)
191             if not output:
192                 return self.EXIT_CODE_BAD_MERGE
193             results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
194         else:
195             # FIXME: Remove this code once webkit-perf.appspot.com supported "values".
196             for result in output['results'].values():
197                 if isinstance(result, dict) and 'values' in result:
198                     del result['values']
199
200         self._generate_output_files(output_json_path, results_page_path, output)
201
202         if test_results_server:
203             if not self._upload_json(test_results_server, output_json_path):
204                 return self.EXIT_CODE_FAILED_UPLOADING
205         elif options.show_results:
206             self._port.show_results_html_file(results_page_path)
207
208     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
209         contents = {'results': self._results}
210         if description:
211             contents['description'] = description
212         for (name, path) in self._port.repository_paths():
213             contents[name + '-revision'] = self._host.scm().svn_revision(path)
214
215         # FIXME: Add --branch or auto-detect the branch we're in
216         for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
217             'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
218             if value:
219                 contents[key] = value
220
221         return contents
222
223     def _merge_slave_config_json(self, slave_config_json_path, output):
224         if not self._host.filesystem.isfile(slave_config_json_path):
225             _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
226             return None
227
228         try:
229             slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
230             slave_config = json.load(slave_config_json)
231             return dict(slave_config.items() + output.items())
232         except Exception, error:
233             _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
234         return None
235
236     def _merge_outputs(self, output_json_path, output):
237         if not self._host.filesystem.isfile(output_json_path):
238             return [output]
239         try:
240             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
241             return existing_outputs + [output]
242         except Exception, error:
243             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
244         return None
245
246     def _generate_output_files(self, output_json_path, results_page_path, output):
247         filesystem = self._host.filesystem
248
249         json_output = json.dumps(output)
250         filesystem.write_text_file(output_json_path, json_output)
251
252         if results_page_path:
253             template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
254             template = filesystem.read_text_file(template_path)
255
256             absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
257             results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
258             results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
259
260             filesystem.write_text_file(results_page_path, results_page)
261
262     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
263         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
264         try:
265             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
266         except Exception, error:
267             _log.error("Failed to upload JSON file in 120s: %s" % error)
268             return False
269
270         response_body = [line.strip('\n') for line in response]
271         if response_body != ['OK']:
272             _log.error("Uploaded JSON but got a bad response:")
273             for line in response_body:
274                 _log.error(line)
275             return False
276
277         _log.info("JSON file uploaded.")
278         return True
279
280     def _print_status(self, tests, expected, unexpected):
281         if len(tests) == expected + unexpected:
282             status = "Ran %d tests" % len(tests)
283         else:
284             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
285         if unexpected:
286             status += " (%d didn't run)" % unexpected
287         _log.info(status)
288
289     def _run_tests_set(self, tests, port):
290         result_count = len(tests)
291         expected = 0
292         unexpected = 0
293         driver = None
294
295         for test in tests:
296             driver = port.create_driver(worker_number=1, no_timeout=True)
297
298             if self._options.pause_before_testing:
299                 driver.start()
300                 if not self._host.user.confirm("Ready to run test?"):
301                     driver.stop()
302                     return unexpected
303
304             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
305             if self._run_single_test(test, driver):
306                 expected = expected + 1
307             else:
308                 unexpected = unexpected + 1
309
310             _log.info('')
311
312             driver.stop()
313
314         return unexpected
315
316     def _run_single_test(self, test, driver):
317         start_time = time.time()
318
319         new_results = test.run(driver, self._options.time_out_ms)
320         if new_results:
321             self._results.update(new_results)
322         else:
323             _log.error('FAILED')
324
325         _log.info("Finished: %f s" % (time.time() - start_time))
326
327         return new_results != None