0a0dbf2be352e37707d1231d722111d58aae7c95
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     EXIT_CODE_BAD_BUILD = -1
53     EXIT_CODE_BAD_SOURCE_JSON = -2
54     EXIT_CODE_BAD_MERGE = -3
55     EXIT_CODE_FAILED_UPLOADING = -4
56     EXIT_CODE_BAD_PREPARATION = -5
57
58     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60     def __init__(self, args=None, port=None):
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68         self._host._initialize_scm()
69         self._webkit_base_dir_len = len(self._port.webkit_base())
70         self._base_path = self._port.perf_tests_dir()
71         self._results = {}
72         self._timestamp = time.time()
73
74     @staticmethod
75     def _parse_args(args=None):
76         perf_option_list = [
77             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
78                 help='Set the configuration to Debug'),
79             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
80                 help='Set the configuration to Release'),
81             optparse.make_option("--platform",
82                 help="Specify port/platform being tested (i.e. chromium-mac)"),
83             optparse.make_option("--chromium",
84                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
85             optparse.make_option("--builder-name",
86                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
87             optparse.make_option("--build-number",
88                 help=("The build number of the builder running this script.")),
89             optparse.make_option("--build", dest="build", action="store_true", default=True,
90                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
91             optparse.make_option("--no-build", dest="build", action="store_false",
92                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
93             optparse.make_option("--build-directory",
94                 help="Path to the directory under which build files are kept (should not include configuration)"),
95             optparse.make_option("--time-out-ms", default=600 * 1000,
96                 help="Set the timeout for each test"),
97             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
98                 help="Pause before running the tests to let user attach a performance monitor."),
99             optparse.make_option("--no-results", action="store_true", default=False,
100                 help="Do no generate results JSON and results page."),
101             optparse.make_option("--output-json-path",
102                 help="Filename of the JSON file that summaries the results."),
103             optparse.make_option("--source-json-path",
104                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
105             optparse.make_option("--test-results-server",
106                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
107             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
108                 help="Use WebKitTestRunner rather than DumpRenderTree."),
109             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
110                 help="Run replay tests."),
111             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
112                 help="Run all tests, including the ones in the Skipped list."),
113             ]
114         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
115
116     def _collect_tests(self):
117         """Return the list of tests found."""
118
119         test_extensions = ['.html', '.svg']
120         if self._options.replay:
121             test_extensions.append('.replay')
122
123         def _is_test_file(filesystem, dirname, filename):
124             return filesystem.splitext(filename)[1] in test_extensions
125
126         filesystem = self._host.filesystem
127
128         paths = []
129         for arg in self._args:
130             paths.append(arg)
131             relpath = filesystem.relpath(arg, self._base_path)
132             if relpath:
133                 paths.append(relpath)
134
135         skipped_directories = set(['.svn', 'resources'])
136         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
137         tests = []
138         for path in test_files:
139             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
140             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
141                 continue
142             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
143             tests.append(test)
144
145         return tests
146
147     def run(self):
148         if not self._port.check_build(needs_http=False):
149             _log.error("Build not up to date for %s" % self._port._path_to_driver())
150             return self.EXIT_CODE_BAD_BUILD
151
152         tests = self._collect_tests()
153         _log.info("Running %d tests" % len(tests))
154
155         for test in tests:
156             if not test.prepare(self._options.time_out_ms):
157                 return self.EXIT_CODE_BAD_PREPARATION
158
159         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
160         if not self._options.no_results:
161             exit_code = self._generate_and_show_results()
162             if exit_code:
163                 return exit_code
164
165         return unexpected
166
167     def _generate_and_show_results(self):
168         options = self._options
169         output_json_path = options.output_json_path
170         if not output_json_path:
171             output_json_path = self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
172
173         output = self._generate_results_dict(self._timestamp, options.platform, options.builder_name, options.build_number)
174
175         if options.source_json_path:
176             output = self._merge_source_json(options.source_json_path, output)
177             if not output:
178                 return self.EXIT_CODE_BAD_SOURCE_JSON
179
180         test_results_server = options.test_results_server
181         results_page_path = None
182         if not test_results_server:
183             output = self._merge_outputs(output_json_path, output)
184             if not output:
185                 return self.EXIT_CODE_BAD_MERGE
186             results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
187
188         self._generate_output_files(output_json_path, results_page_path, output)
189
190         if test_results_server:
191             if not self._upload_json(test_results_server, output_json_path):
192                 return self.EXIT_CODE_FAILED_UPLOADING
193         else:
194             self._port.show_results_html_file(results_page_path)
195
196     def _generate_results_dict(self, timestamp, platform, builder_name, build_number):
197         contents = {'results': self._results}
198         for (name, path) in self._port.repository_paths():
199             contents[name + '-revision'] = self._host.scm().svn_revision(path)
200
201         # FIXME: Add --branch or auto-detect the branch we're in
202         for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
203             'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
204             if value:
205                 contents[key] = value
206
207         return contents
208
209     def _merge_source_json(self, source_json_path, output):
210         try:
211             source_json_file = self._host.filesystem.open_text_file_for_reading(source_json_path)
212             source_json = json.load(source_json_file)
213             return dict(source_json.items() + output.items())
214         except Exception, error:
215             _log.error("Failed to merge source JSON file %s: %s" % (source_json_path, error))
216         return None
217
218     def _merge_outputs(self, output_json_path, output):
219         if not self._host.filesystem.isfile(output_json_path):
220             return [output]
221         try:
222             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
223             return existing_outputs + [output]
224         except Exception, error:
225             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
226         return None
227
228     def _generate_output_files(self, output_json_path, results_page_path, output):
229         filesystem = self._host.filesystem
230
231         json_output = json.dumps(output)
232         filesystem.write_text_file(output_json_path, json_output)
233
234         if results_page_path:
235             jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
236             jquery = filesystem.read_text_file(jquery_path)
237
238             template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
239             template = filesystem.read_text_file(template_path)
240
241             results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
242                 '<script>%s</script><script id="json">%s</script>' % (jquery, json_output))
243
244             filesystem.write_text_file(results_page_path, results_page)
245
246     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
247         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
248         try:
249             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
250         except Exception, error:
251             _log.error("Failed to upload JSON file in 120s: %s" % error)
252             return False
253
254         response_body = [line.strip('\n') for line in response]
255         if response_body != ['OK']:
256             _log.error("Uploaded JSON but got a bad response:")
257             for line in response_body:
258                 _log.error(line)
259             return False
260
261         _log.info("JSON file uploaded.")
262         return True
263
264     def _print_status(self, tests, expected, unexpected):
265         if len(tests) == expected + unexpected:
266             status = "Ran %d tests" % len(tests)
267         else:
268             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
269         if unexpected:
270             status += " (%d didn't run)" % unexpected
271         _log.info(status)
272
273     def _run_tests_set(self, tests, port):
274         result_count = len(tests)
275         expected = 0
276         unexpected = 0
277         driver = None
278
279         for test in tests:
280             driver = port.create_driver(worker_number=1, no_timeout=True)
281
282             if self._options.pause_before_testing:
283                 driver.start()
284                 if not self._host.user.confirm("Ready to run test?"):
285                     driver.stop()
286                     return unexpected
287
288             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
289             if self._run_single_test(test, driver):
290                 expected = expected + 1
291             else:
292                 unexpected = unexpected + 1
293
294             _log.info('')
295
296             driver.stop()
297
298         return unexpected
299
300     def _run_single_test(self, test, driver):
301         start_time = time.time()
302
303         new_results = test.run(driver, self._options.time_out_ms)
304         if new_results:
305             self._results.update(new_results)
306         else:
307             _log.error('FAILED')
308
309         _log.debug("Finished: %f s" % (time.time() - start_time))
310
311         return new_results != None