b4c29490a4503b4ff60cccea58b4a8953af54456
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44
45
46 _log = logging.getLogger(__name__)
47
48
49 class PerfTestsRunner(object):
50     _default_branch = 'webkit-trunk'
51     _EXIT_CODE_BAD_BUILD = -1
52     _EXIT_CODE_BAD_JSON = -2
53     _EXIT_CODE_FAILED_UPLOADING = -3
54
55     def __init__(self, args=None, port=None):
56         self._options, self._args = PerfTestsRunner._parse_args(args)
57         if port:
58             self._port = port
59             self._host = self._port.host
60         else:
61             self._host = Host()
62             self._port = self._host.port_factory.get(self._options.platform, self._options)
63         self._host._initialize_scm()
64         self._webkit_base_dir_len = len(self._port.webkit_base())
65         self._base_path = self._port.perf_tests_dir()
66         self._results = {}
67         self._timestamp = time.time()
68
69     @staticmethod
70     def _parse_args(args=None):
71         perf_option_list = [
72             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
73                 help='Set the configuration to Debug'),
74             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
75                 help='Set the configuration to Release'),
76             optparse.make_option("--platform",
77                 help="Specify port/platform being tested (i.e. chromium-mac)"),
78             optparse.make_option("--chromium",
79                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
80             optparse.make_option("--builder-name",
81                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
82             optparse.make_option("--build-number",
83                 help=("The build number of the builder running this script.")),
84             optparse.make_option("--build", dest="build", action="store_true", default=True,
85                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
86             optparse.make_option("--build-directory",
87                 help="Path to the directory under which build files are kept (should not include configuration)"),
88             optparse.make_option("--time-out-ms", default=600 * 1000,
89                 help="Set the timeout for each test"),
90             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
91                 help="Pause before running the tests to let user attach a performance monitor."),
92             optparse.make_option("--output-json-path",
93                 help="Filename of the JSON file that summaries the results"),
94             optparse.make_option("--source-json-path",
95                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
96             optparse.make_option("--test-results-server",
97                 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
98             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
99                 help="Use WebKitTestRunner rather than DumpRenderTree."),
100             ]
101         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
102
103     def _collect_tests(self):
104         """Return the list of tests found."""
105
106         def _is_test_file(filesystem, dirname, filename):
107             return filesystem.splitext(filename)[1] in ['.html', '.svg']
108
109         filesystem = self._host.filesystem
110
111         paths = []
112         for arg in self._args:
113             paths.append(arg)
114             relpath = filesystem.relpath(arg, self._base_path)
115             if relpath:
116                 paths.append(relpath)
117
118         skipped_directories = set(['.svn', 'resources'])
119         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
120         tests = []
121         for path in test_files:
122             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
123             if self._port.skips_perf_test(relative_path):
124                 continue
125             tests.append(PerfTestFactory.create_perf_test(relative_path, path))
126
127         return tests
128
129     def run(self):
130         if not self._port.check_build(needs_http=False):
131             _log.error("Build not up to date for %s" % self._port._path_to_driver())
132             return self._EXIT_CODE_BAD_BUILD
133
134         # We wrap any parts of the run that are slow or likely to raise exceptions
135         # in a try/finally to ensure that we clean up the logging configuration.
136         unexpected = -1
137         tests = self._collect_tests()
138         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
139
140         options = self._options
141         if self._options.output_json_path:
142             # FIXME: Add --branch or auto-detect the branch we're in
143             test_results_server = options.test_results_server
144             branch = self._default_branch if test_results_server else None
145             build_number = int(options.build_number) if options.build_number else None
146             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
147                 branch, options.platform, options.builder_name, build_number) and not unexpected:
148                 return self._EXIT_CODE_BAD_JSON
149             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
150                 return self._EXIT_CODE_FAILED_UPLOADING
151
152         return unexpected
153
154     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
155         contents = {'timestamp': int(timestamp), 'results': self._results}
156         for (name, path) in self._port.repository_paths():
157             contents[name + '-revision'] = self._host.scm().svn_revision(path)
158
159         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
160             if value:
161                 contents[key] = value
162
163         filesystem = self._host.filesystem
164         succeeded = False
165         if source_json_path:
166             try:
167                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
168                 source_json = json.load(source_json_file)
169                 contents = dict(source_json.items() + contents.items())
170                 succeeded = True
171             except IOError, error:
172                 _log.error("Failed to read %s: %s" % (source_json_path, error))
173             except ValueError, error:
174                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
175             except TypeError, error:
176                 _log.error("Failed to merge JSON files: %s" % error)
177             if not succeeded:
178                 return False
179
180         filesystem.write_text_file(output_json_path, json.dumps(contents))
181         return True
182
183     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
184         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
185         try:
186             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
187         except Exception, error:
188             _log.error("Failed to upload JSON file in 120s: %s" % error)
189             return False
190
191         response_body = [line.strip('\n') for line in response]
192         if response_body != ['OK']:
193             _log.error("Uploaded JSON but got a bad response:")
194             for line in response_body:
195                 _log.error(line)
196             return False
197
198         _log.info("JSON file uploaded.")
199         return True
200
201     def _print_status(self, tests, expected, unexpected):
202         if len(tests) == expected + unexpected:
203             status = "Ran %d tests" % len(tests)
204         else:
205             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
206         if unexpected:
207             status += " (%d didn't run)" % unexpected
208         _log.info(status)
209
210     def _run_tests_set(self, tests, port):
211         result_count = len(tests)
212         expected = 0
213         unexpected = 0
214         driver = None
215
216         for test in tests:
217             driver = port.create_driver(worker_number=1, no_timeout=True)
218
219             if self._options.pause_before_testing:
220                 driver.start()
221                 if not self._host.user.confirm("Ready to run test?"):
222                     driver.stop()
223                     return unexpected
224
225             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
226             if self._run_single_test(test, driver):
227                 expected = expected + 1
228             else:
229                 unexpected = unexpected + 1
230
231             _log.info('')
232
233             driver.stop()
234
235         return unexpected
236
237     def _run_single_test(self, test, driver):
238         start_time = time.time()
239
240         new_results = test.run(driver, self._options.time_out_ms)
241         if new_results:
242             self._results.update(new_results)
243         else:
244             _log.error('FAILED')
245
246         _log.debug("Finished: %f s" % (time.time() - start_time))
247
248         return new_results != None