Add public page loading performance tests using web-page-replay
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     _EXIT_CODE_BAD_BUILD = -1
53     _EXIT_CODE_BAD_JSON = -2
54     _EXIT_CODE_FAILED_UPLOADING = -3
55     _EXIT_CODE_BAD_PREPARATION = -4
56
57     def __init__(self, args=None, port=None):
58         self._options, self._args = PerfTestsRunner._parse_args(args)
59         if port:
60             self._port = port
61             self._host = self._port.host
62         else:
63             self._host = Host()
64             self._port = self._host.port_factory.get(self._options.platform, self._options)
65         self._host._initialize_scm()
66         self._webkit_base_dir_len = len(self._port.webkit_base())
67         self._base_path = self._port.perf_tests_dir()
68         self._results = {}
69         self._timestamp = time.time()
70
71     @staticmethod
72     def _parse_args(args=None):
73         perf_option_list = [
74             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
75                 help='Set the configuration to Debug'),
76             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
77                 help='Set the configuration to Release'),
78             optparse.make_option("--platform",
79                 help="Specify port/platform being tested (i.e. chromium-mac)"),
80             optparse.make_option("--chromium",
81                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
82             optparse.make_option("--builder-name",
83                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
84             optparse.make_option("--build-number",
85                 help=("The build number of the builder running this script.")),
86             optparse.make_option("--build", dest="build", action="store_true", default=True,
87                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
88             optparse.make_option("--build-directory",
89                 help="Path to the directory under which build files are kept (should not include configuration)"),
90             optparse.make_option("--time-out-ms", default=600 * 1000,
91                 help="Set the timeout for each test"),
92             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
93                 help="Pause before running the tests to let user attach a performance monitor."),
94             optparse.make_option("--output-json-path",
95                 help="Filename of the JSON file that summaries the results."),
96             optparse.make_option("--source-json-path",
97                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
98             optparse.make_option("--test-results-server",
99                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
100             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
101                 help="Use WebKitTestRunner rather than DumpRenderTree."),
102             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
103                 help="Run replay tests."),
104             ]
105         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
106
107     def _collect_tests(self):
108         """Return the list of tests found."""
109
110         test_extensions = ['.html', '.svg']
111         if self._options.replay:
112             test_extensions.append('.replay')
113
114         def _is_test_file(filesystem, dirname, filename):
115             return filesystem.splitext(filename)[1] in test_extensions
116
117         filesystem = self._host.filesystem
118
119         paths = []
120         for arg in self._args:
121             paths.append(arg)
122             relpath = filesystem.relpath(arg, self._base_path)
123             if relpath:
124                 paths.append(relpath)
125
126         skipped_directories = set(['.svn', 'resources'])
127         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
128         tests = []
129         for path in test_files:
130             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
131             if self._port.skips_perf_test(relative_path):
132                 continue
133             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
134             tests.append(test)
135
136         return tests
137
138     def run(self):
139         if not self._port.check_build(needs_http=False):
140             _log.error("Build not up to date for %s" % self._port._path_to_driver())
141             return self._EXIT_CODE_BAD_BUILD
142
143         tests = self._collect_tests()
144         _log.info("Running %d tests" % len(tests))
145
146         for test in tests:
147             if not test.prepare(self._options.time_out_ms):
148                 return self._EXIT_CODE_BAD_PREPARATION
149
150         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
151
152         options = self._options
153         if self._options.output_json_path:
154             # FIXME: Add --branch or auto-detect the branch we're in
155             test_results_server = options.test_results_server
156             branch = self._default_branch if test_results_server else None
157             build_number = int(options.build_number) if options.build_number else None
158             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
159                 branch, options.platform, options.builder_name, build_number) and not unexpected:
160                 return self._EXIT_CODE_BAD_JSON
161             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
162                 return self._EXIT_CODE_FAILED_UPLOADING
163
164         return unexpected
165
166     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
167         contents = {'timestamp': int(timestamp), 'results': self._results}
168         for (name, path) in self._port.repository_paths():
169             contents[name + '-revision'] = self._host.scm().svn_revision(path)
170
171         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
172             if value:
173                 contents[key] = value
174
175         filesystem = self._host.filesystem
176         succeeded = False
177         if source_json_path:
178             try:
179                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
180                 source_json = json.load(source_json_file)
181                 contents = dict(source_json.items() + contents.items())
182                 succeeded = True
183             except IOError, error:
184                 _log.error("Failed to read %s: %s" % (source_json_path, error))
185             except ValueError, error:
186                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
187             except TypeError, error:
188                 _log.error("Failed to merge JSON files: %s" % error)
189             if not succeeded:
190                 return False
191
192         filesystem.write_text_file(output_json_path, json.dumps(contents))
193         return True
194
195     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
196         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
197         try:
198             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
199         except Exception, error:
200             _log.error("Failed to upload JSON file in 120s: %s" % error)
201             return False
202
203         response_body = [line.strip('\n') for line in response]
204         if response_body != ['OK']:
205             _log.error("Uploaded JSON but got a bad response:")
206             for line in response_body:
207                 _log.error(line)
208             return False
209
210         _log.info("JSON file uploaded.")
211         return True
212
213     def _print_status(self, tests, expected, unexpected):
214         if len(tests) == expected + unexpected:
215             status = "Ran %d tests" % len(tests)
216         else:
217             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
218         if unexpected:
219             status += " (%d didn't run)" % unexpected
220         _log.info(status)
221
222     def _run_tests_set(self, tests, port):
223         result_count = len(tests)
224         expected = 0
225         unexpected = 0
226         driver = None
227
228         for test in tests:
229             driver = port.create_driver(worker_number=1, no_timeout=True)
230
231             if self._options.pause_before_testing:
232                 driver.start()
233                 if not self._host.user.confirm("Ready to run test?"):
234                     driver.stop()
235                     return unexpected
236
237             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
238             if self._run_single_test(test, driver):
239                 expected = expected + 1
240             else:
241                 unexpected = unexpected + 1
242
243             _log.info('')
244
245             driver.stop()
246
247         return unexpected
248
249     def _run_single_test(self, test, driver):
250         start_time = time.time()
251
252         new_results = test.run(driver, self._options.time_out_ms)
253         if new_results:
254             self._results.update(new_results)
255         else:
256             _log.error('FAILED')
257
258         _log.debug("Finished: %f s" % (time.time() - start_time))
259
260         return new_results != None