ab4386443ee96b5f96d11923cf1b732e43f56adb
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     _EXIT_CODE_BAD_BUILD = -1
53     _EXIT_CODE_BAD_JSON = -2
54     _EXIT_CODE_FAILED_UPLOADING = -3
55     _EXIT_CODE_BAD_PREPARATION = -4
56
57     def __init__(self, args=None, port=None):
58         self._options, self._args = PerfTestsRunner._parse_args(args)
59         if port:
60             self._port = port
61             self._host = self._port.host
62         else:
63             self._host = Host()
64             self._port = self._host.port_factory.get(self._options.platform, self._options)
65         self._host._initialize_scm()
66         self._webkit_base_dir_len = len(self._port.webkit_base())
67         self._base_path = self._port.perf_tests_dir()
68         self._results = {}
69         self._timestamp = time.time()
70
71     @staticmethod
72     def _parse_args(args=None):
73         perf_option_list = [
74             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
75                 help='Set the configuration to Debug'),
76             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
77                 help='Set the configuration to Release'),
78             optparse.make_option("--platform",
79                 help="Specify port/platform being tested (i.e. chromium-mac)"),
80             optparse.make_option("--chromium",
81                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
82             optparse.make_option("--builder-name",
83                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
84             optparse.make_option("--build-number",
85                 help=("The build number of the builder running this script.")),
86             optparse.make_option("--build", dest="build", action="store_true", default=True,
87                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
88             optparse.make_option("--no-build", dest="build", action="store_false",
89                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
90             optparse.make_option("--build-directory",
91                 help="Path to the directory under which build files are kept (should not include configuration)"),
92             optparse.make_option("--time-out-ms", default=600 * 1000,
93                 help="Set the timeout for each test"),
94             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
95                 help="Pause before running the tests to let user attach a performance monitor."),
96             optparse.make_option("--output-json-path",
97                 help="Filename of the JSON file that summaries the results."),
98             optparse.make_option("--source-json-path",
99                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
100             optparse.make_option("--test-results-server",
101                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
102             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
103                 help="Use WebKitTestRunner rather than DumpRenderTree."),
104             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
105                 help="Run replay tests."),
106             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
107                 help="Run all tests, including the ones in the Skipped list."),
108             ]
109         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
110
111     def _collect_tests(self):
112         """Return the list of tests found."""
113
114         test_extensions = ['.html', '.svg']
115         if self._options.replay:
116             test_extensions.append('.replay')
117
118         def _is_test_file(filesystem, dirname, filename):
119             return filesystem.splitext(filename)[1] in test_extensions
120
121         filesystem = self._host.filesystem
122
123         paths = []
124         for arg in self._args:
125             paths.append(arg)
126             relpath = filesystem.relpath(arg, self._base_path)
127             if relpath:
128                 paths.append(relpath)
129
130         skipped_directories = set(['.svn', 'resources'])
131         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
132         tests = []
133         for path in test_files:
134             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
135             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
136                 continue
137             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
138             tests.append(test)
139
140         return tests
141
142     def run(self):
143         if not self._port.check_build(needs_http=False):
144             _log.error("Build not up to date for %s" % self._port._path_to_driver())
145             return self._EXIT_CODE_BAD_BUILD
146
147         tests = self._collect_tests()
148         _log.info("Running %d tests" % len(tests))
149
150         for test in tests:
151             if not test.prepare(self._options.time_out_ms):
152                 return self._EXIT_CODE_BAD_PREPARATION
153
154         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
155
156         options = self._options
157         if self._options.output_json_path:
158             # FIXME: Add --branch or auto-detect the branch we're in
159             test_results_server = options.test_results_server
160             branch = self._default_branch if test_results_server else None
161             build_number = int(options.build_number) if options.build_number else None
162             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
163                 branch, options.platform, options.builder_name, build_number) and not unexpected:
164                 return self._EXIT_CODE_BAD_JSON
165             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
166                 return self._EXIT_CODE_FAILED_UPLOADING
167
168         return unexpected
169
170     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
171         contents = {'timestamp': int(timestamp), 'results': self._results}
172         for (name, path) in self._port.repository_paths():
173             contents[name + '-revision'] = self._host.scm().svn_revision(path)
174
175         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
176             if value:
177                 contents[key] = value
178
179         filesystem = self._host.filesystem
180         succeeded = False
181         if source_json_path:
182             try:
183                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
184                 source_json = json.load(source_json_file)
185                 contents = dict(source_json.items() + contents.items())
186                 succeeded = True
187             except IOError, error:
188                 _log.error("Failed to read %s: %s" % (source_json_path, error))
189             except ValueError, error:
190                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
191             except TypeError, error:
192                 _log.error("Failed to merge JSON files: %s" % error)
193             if not succeeded:
194                 return False
195
196         filesystem.write_text_file(output_json_path, json.dumps(contents))
197         return True
198
199     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
200         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
201         try:
202             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
203         except Exception, error:
204             _log.error("Failed to upload JSON file in 120s: %s" % error)
205             return False
206
207         response_body = [line.strip('\n') for line in response]
208         if response_body != ['OK']:
209             _log.error("Uploaded JSON but got a bad response:")
210             for line in response_body:
211                 _log.error(line)
212             return False
213
214         _log.info("JSON file uploaded.")
215         return True
216
217     def _print_status(self, tests, expected, unexpected):
218         if len(tests) == expected + unexpected:
219             status = "Ran %d tests" % len(tests)
220         else:
221             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
222         if unexpected:
223             status += " (%d didn't run)" % unexpected
224         _log.info(status)
225
226     def _run_tests_set(self, tests, port):
227         result_count = len(tests)
228         expected = 0
229         unexpected = 0
230         driver = None
231
232         for test in tests:
233             driver = port.create_driver(worker_number=1, no_timeout=True)
234
235             if self._options.pause_before_testing:
236                 driver.start()
237                 if not self._host.user.confirm("Ready to run test?"):
238                     driver.stop()
239                     return unexpected
240
241             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
242             if self._run_single_test(test, driver):
243                 expected = expected + 1
244             else:
245                 unexpected = unexpected + 1
246
247             _log.info('')
248
249             driver.stop()
250
251         return unexpected
252
253     def _run_single_test(self, test, driver):
254         start_time = time.time()
255
256         new_results = test.run(driver, self._options.time_out_ms)
257         if new_results:
258             self._results.update(new_results)
259         else:
260             _log.error('FAILED')
261
262         _log.debug("Finished: %f s" % (time.time() - start_time))
263
264         return new_results != None