Enable SVG page loading performance tests
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
44 from webkitpy.performance_tests.perftest import PageLoadingPerfTest
45 from webkitpy.performance_tests.perftest import PerfTest
46
47
48 _log = logging.getLogger(__name__)
49
50
51 class PerfTestsRunner(object):
52     _pattern_for_chromium_style_tests = re.compile('^inspector/')
53     _pattern_for_page_loading_tests = re.compile('^PageLoad/')
54     _default_branch = 'webkit-trunk'
55     _EXIT_CODE_BAD_BUILD = -1
56     _EXIT_CODE_BAD_JSON = -2
57     _EXIT_CODE_FAILED_UPLOADING = -3
58
59     def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
60         self._buildbot_output = buildbot_output
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68         self._host._initialize_scm()
69         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output)
70         self._webkit_base_dir_len = len(self._port.webkit_base())
71         self._base_path = self._port.perf_tests_dir()
72         self._results = {}
73         self._timestamp = time.time()
74
75     @staticmethod
76     def _parse_args(args=None):
77         print_options = printing.print_options()
78
79         perf_option_list = [
80             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
81                 help='Set the configuration to Debug'),
82             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
83                 help='Set the configuration to Release'),
84             optparse.make_option("--platform",
85                 help="Specify port/platform being tested (i.e. chromium-mac)"),
86             optparse.make_option("--chromium",
87                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
88             optparse.make_option("--builder-name",
89                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
90             optparse.make_option("--build-number",
91                 help=("The build number of the builder running this script.")),
92             optparse.make_option("--build", dest="build", action="store_true", default=True,
93                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
94             optparse.make_option("--build-directory",
95                 help="Path to the directory under which build files are kept (should not include configuration)"),
96             optparse.make_option("--time-out-ms", default=600 * 1000,
97                 help="Set the timeout for each test"),
98             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
99                 help="Pause before running the tests to let user attach a performance monitor."),
100             optparse.make_option("--output-json-path",
101                 help="Filename of the JSON file that summaries the results"),
102             optparse.make_option("--source-json-path",
103                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
104             optparse.make_option("--test-results-server",
105                 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
106             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
107                 help="Use WebKitTestRunner rather than DumpRenderTree."),
108             ]
109
110         option_list = (perf_option_list + print_options)
111         return optparse.OptionParser(option_list=option_list).parse_args(args)
112
113     def _collect_tests(self):
114         """Return the list of tests found."""
115
116         def _is_test_file(filesystem, dirname, filename):
117             return filesystem.splitext(filename)[1] in ['.html', '.svg']
118
119         filesystem = self._host.filesystem
120
121         paths = []
122         for arg in self._args:
123             paths.append(arg)
124             relpath = filesystem.relpath(arg, self._base_path)
125             if relpath:
126                 paths.append(relpath)
127
128         skipped_directories = set(['.svn', 'resources'])
129         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
130         tests = []
131         for path in test_files:
132             relative_path = self._port.relative_perf_test_filename(path)
133             if self._port.skips_perf_test(relative_path):
134                 continue
135             test_name = relative_path.replace('\\', '/')
136             dirname = filesystem.dirname(path)
137             if self._pattern_for_chromium_style_tests.match(relative_path):
138                 tests.append(ChromiumStylePerfTest(test_name, dirname, path))
139             elif self._pattern_for_page_loading_tests.match(relative_path):
140                 tests.append(PageLoadingPerfTest(test_name, dirname, path))
141             else:
142                 tests.append(PerfTest(test_name, dirname, path))
143
144         return tests
145
146     def run(self):
147         if self._options.help_printing:
148             self._printer.help_printing()
149             self._printer.cleanup()
150             return 0
151
152         if not self._port.check_build(needs_http=False):
153             _log.error("Build not up to date for %s" % self._port._path_to_driver())
154             return self._EXIT_CODE_BAD_BUILD
155
156         # We wrap any parts of the run that are slow or likely to raise exceptions
157         # in a try/finally to ensure that we clean up the logging configuration.
158         unexpected = -1
159         try:
160             tests = self._collect_tests()
161             unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
162         finally:
163             self._printer.cleanup()
164
165         options = self._options
166         if self._options.output_json_path:
167             # FIXME: Add --branch or auto-detect the branch we're in
168             test_results_server = options.test_results_server
169             branch = self._default_branch if test_results_server else None
170             build_number = int(options.build_number) if options.build_number else None
171             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
172                 branch, options.platform, options.builder_name, build_number) and not unexpected:
173                 return self._EXIT_CODE_BAD_JSON
174             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
175                 return self._EXIT_CODE_FAILED_UPLOADING
176
177         return unexpected
178
179     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
180         contents = {'timestamp': int(timestamp), 'results': self._results}
181         for (name, path) in self._port.repository_paths():
182             contents[name + '-revision'] = self._host.scm().svn_revision(path)
183
184         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
185             if value:
186                 contents[key] = value
187
188         filesystem = self._host.filesystem
189         succeeded = False
190         if source_json_path:
191             try:
192                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
193                 source_json = json.load(source_json_file)
194                 contents = dict(source_json.items() + contents.items())
195                 succeeded = True
196             except IOError, error:
197                 _log.error("Failed to read %s: %s" % (source_json_path, error))
198             except ValueError, error:
199                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
200             except TypeError, error:
201                 _log.error("Failed to merge JSON files: %s" % error)
202             if not succeeded:
203                 return False
204
205         filesystem.write_text_file(output_json_path, json.dumps(contents))
206         return True
207
208     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
209         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
210         try:
211             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
212         except Exception, error:
213             _log.error("Failed to upload JSON file in 120s: %s" % error)
214             return False
215
216         response_body = [line.strip('\n') for line in response]
217         if response_body != ['OK']:
218             _log.error("Uploaded JSON but got a bad response:")
219             for line in response_body:
220                 _log.error(line)
221             return False
222
223         self._printer.write("JSON file uploaded.")
224         return True
225
226     def _print_status(self, tests, expected, unexpected):
227         if len(tests) == expected + unexpected:
228             status = "Ran %d tests" % len(tests)
229         else:
230             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
231         if unexpected:
232             status += " (%d didn't run)" % unexpected
233         self._printer.write(status)
234
235     def _run_tests_set(self, tests, port):
236         result_count = len(tests)
237         expected = 0
238         unexpected = 0
239         driver = None
240
241         for test in tests:
242             driver = port.create_driver(worker_number=1, no_timeout=True)
243
244             if self._options.pause_before_testing:
245                 driver.start()
246                 if not self._host.user.confirm("Ready to run test?"):
247                     driver.stop()
248                     return unexpected
249
250             self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
251             if self._run_single_test(test, driver):
252                 expected = expected + 1
253             else:
254                 unexpected = unexpected + 1
255
256             self._printer.write('')
257
258             driver.stop()
259
260         return unexpected
261
262     def _run_single_test(self, test, driver):
263         start_time = time.time()
264
265         new_results = test.run(driver, self._options.time_out_ms, self._printer, self._buildbot_output)
266         if new_results:
267             self._results.update(new_results)
268         else:
269             self._printer.write('FAILED')
270
271         self._printer.write("Finished: %f s" % (time.time() - start_time))
272
273         return new_results != None