run-perf-tests should generate a results page
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     _EXIT_CODE_BAD_BUILD = -1
53     _EXIT_CODE_BAD_JSON = -2
54     _EXIT_CODE_FAILED_UPLOADING = -3
55     _EXIT_CODE_BAD_PREPARATION = -4
56
57     def __init__(self, args=None, port=None):
58         self._options, self._args = PerfTestsRunner._parse_args(args)
59         if port:
60             self._port = port
61             self._host = self._port.host
62         else:
63             self._host = Host()
64             self._port = self._host.port_factory.get(self._options.platform, self._options)
65         self._host._initialize_scm()
66         self._webkit_base_dir_len = len(self._port.webkit_base())
67         self._base_path = self._port.perf_tests_dir()
68         self._results = {}
69         self._timestamp = time.time()
70
71     @staticmethod
72     def _parse_args(args=None):
73         perf_option_list = [
74             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
75                 help='Set the configuration to Debug'),
76             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
77                 help='Set the configuration to Release'),
78             optparse.make_option("--platform",
79                 help="Specify port/platform being tested (i.e. chromium-mac)"),
80             optparse.make_option("--chromium",
81                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
82             optparse.make_option("--builder-name",
83                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
84             optparse.make_option("--build-number",
85                 help=("The build number of the builder running this script.")),
86             optparse.make_option("--build", dest="build", action="store_true", default=True,
87                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
88             optparse.make_option("--no-build", dest="build", action="store_false",
89                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
90             optparse.make_option("--build-directory",
91                 help="Path to the directory under which build files are kept (should not include configuration)"),
92             optparse.make_option("--time-out-ms", default=600 * 1000,
93                 help="Set the timeout for each test"),
94             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
95                 help="Pause before running the tests to let user attach a performance monitor."),
96             optparse.make_option("--output-json-path",
97                 help="Filename of the JSON file that summaries the results."),
98             optparse.make_option("--source-json-path",
99                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
100             optparse.make_option("--test-results-server",
101                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
102             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
103                 help="Use WebKitTestRunner rather than DumpRenderTree."),
104             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
105                 help="Run replay tests."),
106             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
107                 help="Run all tests, including the ones in the Skipped list."),
108             ]
109         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
110
111     def _collect_tests(self):
112         """Return the list of tests found."""
113
114         test_extensions = ['.html', '.svg']
115         if self._options.replay:
116             test_extensions.append('.replay')
117
118         def _is_test_file(filesystem, dirname, filename):
119             return filesystem.splitext(filename)[1] in test_extensions
120
121         filesystem = self._host.filesystem
122
123         paths = []
124         for arg in self._args:
125             paths.append(arg)
126             relpath = filesystem.relpath(arg, self._base_path)
127             if relpath:
128                 paths.append(relpath)
129
130         skipped_directories = set(['.svn', 'resources'])
131         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
132         tests = []
133         for path in test_files:
134             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
135             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
136                 continue
137             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
138             tests.append(test)
139
140         return tests
141
142     def run(self):
143         if not self._port.check_build(needs_http=False):
144             _log.error("Build not up to date for %s" % self._port._path_to_driver())
145             return self._EXIT_CODE_BAD_BUILD
146
147         tests = self._collect_tests()
148         _log.info("Running %d tests" % len(tests))
149
150         for test in tests:
151             if not test.prepare(self._options.time_out_ms):
152                 return self._EXIT_CODE_BAD_PREPARATION
153
154         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
155
156         options = self._options
157         if self._options.output_json_path:
158             # FIXME: Add --branch or auto-detect the branch we're in
159             test_results_server = options.test_results_server
160             branch = self._default_branch if test_results_server else None
161             build_number = int(options.build_number) if options.build_number else None
162
163             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
164                 not test_results_server,
165                 branch, options.platform, options.builder_name, build_number) and not unexpected:
166                 return self._EXIT_CODE_BAD_JSON
167
168             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
169                 return self._EXIT_CODE_FAILED_UPLOADING
170
171         return unexpected
172
173     def _generate_json(self, timestamp, output_json_path, source_json_path, should_generate_results_page,
174         branch, platform, builder_name, build_number):
175
176         contents = {'timestamp': int(timestamp), 'results': self._results}
177         for (name, path) in self._port.repository_paths():
178             contents[name + '-revision'] = self._host.scm().svn_revision(path)
179
180         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
181             if value:
182                 contents[key] = value
183
184         filesystem = self._host.filesystem
185         succeeded = False
186         if source_json_path:
187             try:
188                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
189                 source_json = json.load(source_json_file)
190                 contents = dict(source_json.items() + contents.items())
191                 succeeded = True
192             except IOError, error:
193                 _log.error("Failed to read %s: %s" % (source_json_path, error))
194             except ValueError, error:
195                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
196             except TypeError, error:
197                 _log.error("Failed to merge JSON files: %s" % error)
198             if not succeeded:
199                 return False
200
201         if should_generate_results_page:
202             if filesystem.isfile(output_json_path):
203                 existing_contents = json.loads(filesystem.read_text_file(output_json_path))
204                 existing_contents.append(contents)
205                 contents = existing_contents
206             else:
207                 contents = [contents]
208
209         serialized_contents = json.dumps(contents)
210         filesystem.write_text_file(output_json_path, serialized_contents)
211
212         if should_generate_results_page:
213             jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
214             jquery = filesystem.read_text_file(jquery_path)
215
216             template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
217             template = filesystem.read_text_file(template_path)
218
219             results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
220                 '<script>%s</script><script id="json">%s</script>' % (jquery, serialized_contents))
221
222             filesystem.write_text_file(filesystem.splitext(output_json_path)[0] + '.html', results_page)
223
224         return True
225
226     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
227         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
228         try:
229             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
230         except Exception, error:
231             _log.error("Failed to upload JSON file in 120s: %s" % error)
232             return False
233
234         response_body = [line.strip('\n') for line in response]
235         if response_body != ['OK']:
236             _log.error("Uploaded JSON but got a bad response:")
237             for line in response_body:
238                 _log.error(line)
239             return False
240
241         _log.info("JSON file uploaded.")
242         return True
243
244     def _print_status(self, tests, expected, unexpected):
245         if len(tests) == expected + unexpected:
246             status = "Ran %d tests" % len(tests)
247         else:
248             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
249         if unexpected:
250             status += " (%d didn't run)" % unexpected
251         _log.info(status)
252
253     def _run_tests_set(self, tests, port):
254         result_count = len(tests)
255         expected = 0
256         unexpected = 0
257         driver = None
258
259         for test in tests:
260             driver = port.create_driver(worker_number=1, no_timeout=True)
261
262             if self._options.pause_before_testing:
263                 driver.start()
264                 if not self._host.user.confirm("Ready to run test?"):
265                     driver.stop()
266                     return unexpected
267
268             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
269             if self._run_single_test(test, driver):
270                 expected = expected + 1
271             else:
272                 unexpected = unexpected + 1
273
274             _log.info('')
275
276             driver.stop()
277
278         return unexpected
279
280     def _run_single_test(self, test, driver):
281         start_time = time.time()
282
283         new_results = test.run(driver, self._options.time_out_ms)
284         if new_results:
285             self._results.update(new_results)
286         else:
287             _log.error('FAILED')
288
289         _log.debug("Finished: %f s" % (time.time() - start_time))
290
291         return new_results != None