run-webkit-tests should have ability to add description to its JSON output
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import PerfTestFactory
44 from webkitpy.performance_tests.perftest import ReplayPerfTest
45
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _default_branch = 'webkit-trunk'
52     EXIT_CODE_BAD_BUILD = -1
53     EXIT_CODE_BAD_SOURCE_JSON = -2
54     EXIT_CODE_BAD_MERGE = -3
55     EXIT_CODE_FAILED_UPLOADING = -4
56     EXIT_CODE_BAD_PREPARATION = -5
57
58     _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60     def __init__(self, args=None, port=None):
61         self._options, self._args = PerfTestsRunner._parse_args(args)
62         if port:
63             self._port = port
64             self._host = self._port.host
65         else:
66             self._host = Host()
67             self._port = self._host.port_factory.get(self._options.platform, self._options)
68         self._host._initialize_scm()
69         self._webkit_base_dir_len = len(self._port.webkit_base())
70         self._base_path = self._port.perf_tests_dir()
71         self._results = {}
72         self._timestamp = time.time()
73
74     @staticmethod
75     def _parse_args(args=None):
76         perf_option_list = [
77             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
78                 help='Set the configuration to Debug'),
79             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
80                 help='Set the configuration to Release'),
81             optparse.make_option("--platform",
82                 help="Specify port/platform being tested (i.e. chromium-mac)"),
83             optparse.make_option("--chromium",
84                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
85             optparse.make_option("--builder-name",
86                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
87             optparse.make_option("--build-number",
88                 help=("The build number of the builder running this script.")),
89             optparse.make_option("--build", dest="build", action="store_true", default=True,
90                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
91             optparse.make_option("--no-build", dest="build", action="store_false",
92                 help="Don't check to see if the DumpRenderTree build is up-to-date."),
93             optparse.make_option("--build-directory",
94                 help="Path to the directory under which build files are kept (should not include configuration)"),
95             optparse.make_option("--time-out-ms", default=600 * 1000,
96                 help="Set the timeout for each test"),
97             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
98                 help="Pause before running the tests to let user attach a performance monitor."),
99             optparse.make_option("--no-results", action="store_true", default=False,
100                 help="Do no generate results JSON and results page."),
101             optparse.make_option("--output-json-path",
102                 help="Filename of the JSON file that summaries the results."),
103             optparse.make_option("--source-json-path",
104                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
105             optparse.make_option("--description",
106                 help="Add a description to the output JSON file if one is generated"),
107             optparse.make_option("--test-results-server",
108                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
109             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
110                 help="Use WebKitTestRunner rather than DumpRenderTree."),
111             optparse.make_option("--replay", dest="replay", action="store_true", default=False,
112                 help="Run replay tests."),
113             optparse.make_option("--force", dest="skipped", action="store_true", default=False,
114                 help="Run all tests, including the ones in the Skipped list."),
115             ]
116         return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
117
118     def _collect_tests(self):
119         """Return the list of tests found."""
120
121         test_extensions = ['.html', '.svg']
122         if self._options.replay:
123             test_extensions.append('.replay')
124
125         def _is_test_file(filesystem, dirname, filename):
126             return filesystem.splitext(filename)[1] in test_extensions
127
128         filesystem = self._host.filesystem
129
130         paths = []
131         for arg in self._args:
132             paths.append(arg)
133             relpath = filesystem.relpath(arg, self._base_path)
134             if relpath:
135                 paths.append(relpath)
136
137         skipped_directories = set(['.svn', 'resources'])
138         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
139         tests = []
140         for path in test_files:
141             relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
142             if self._port.skips_perf_test(relative_path) and not self._options.skipped:
143                 continue
144             test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
145             tests.append(test)
146
147         return tests
148
149     def run(self):
150         if not self._port.check_build(needs_http=False):
151             _log.error("Build not up to date for %s" % self._port._path_to_driver())
152             return self.EXIT_CODE_BAD_BUILD
153
154         tests = self._collect_tests()
155         _log.info("Running %d tests" % len(tests))
156
157         for test in tests:
158             if not test.prepare(self._options.time_out_ms):
159                 return self.EXIT_CODE_BAD_PREPARATION
160
161         unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
162         if not self._options.no_results:
163             exit_code = self._generate_and_show_results()
164             if exit_code:
165                 return exit_code
166
167         return unexpected
168
169     def _generate_and_show_results(self):
170         options = self._options
171         output_json_path = options.output_json_path
172         if not output_json_path:
173             output_json_path = self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
174
175         output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
176
177         if options.source_json_path:
178             output = self._merge_source_json(options.source_json_path, output)
179             if not output:
180                 return self.EXIT_CODE_BAD_SOURCE_JSON
181
182         test_results_server = options.test_results_server
183         results_page_path = None
184         if not test_results_server:
185             output = self._merge_outputs(output_json_path, output)
186             if not output:
187                 return self.EXIT_CODE_BAD_MERGE
188             results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
189
190         self._generate_output_files(output_json_path, results_page_path, output)
191
192         if test_results_server:
193             if not self._upload_json(test_results_server, output_json_path):
194                 return self.EXIT_CODE_FAILED_UPLOADING
195         else:
196             self._port.show_results_html_file(results_page_path)
197
198     def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
199         contents = {'results': self._results}
200         if description:
201             contents['description'] = description
202         for (name, path) in self._port.repository_paths():
203             contents[name + '-revision'] = self._host.scm().svn_revision(path)
204
205         # FIXME: Add --branch or auto-detect the branch we're in
206         for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
207             'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
208             if value:
209                 contents[key] = value
210
211         return contents
212
213     def _merge_source_json(self, source_json_path, output):
214         try:
215             source_json_file = self._host.filesystem.open_text_file_for_reading(source_json_path)
216             source_json = json.load(source_json_file)
217             return dict(source_json.items() + output.items())
218         except Exception, error:
219             _log.error("Failed to merge source JSON file %s: %s" % (source_json_path, error))
220         return None
221
222     def _merge_outputs(self, output_json_path, output):
223         if not self._host.filesystem.isfile(output_json_path):
224             return [output]
225         try:
226             existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
227             return existing_outputs + [output]
228         except Exception, error:
229             _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
230         return None
231
232     def _generate_output_files(self, output_json_path, results_page_path, output):
233         filesystem = self._host.filesystem
234
235         json_output = json.dumps(output)
236         filesystem.write_text_file(output_json_path, json_output)
237
238         if results_page_path:
239             jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
240             jquery = filesystem.read_text_file(jquery_path)
241
242             template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
243             template = filesystem.read_text_file(template_path)
244
245             results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
246                 '<script>%s</script><script id="json">%s</script>' % (jquery, json_output))
247
248             filesystem.write_text_file(results_page_path, results_page)
249
250     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
251         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
252         try:
253             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
254         except Exception, error:
255             _log.error("Failed to upload JSON file in 120s: %s" % error)
256             return False
257
258         response_body = [line.strip('\n') for line in response]
259         if response_body != ['OK']:
260             _log.error("Uploaded JSON but got a bad response:")
261             for line in response_body:
262                 _log.error(line)
263             return False
264
265         _log.info("JSON file uploaded.")
266         return True
267
268     def _print_status(self, tests, expected, unexpected):
269         if len(tests) == expected + unexpected:
270             status = "Ran %d tests" % len(tests)
271         else:
272             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
273         if unexpected:
274             status += " (%d didn't run)" % unexpected
275         _log.info(status)
276
277     def _run_tests_set(self, tests, port):
278         result_count = len(tests)
279         expected = 0
280         unexpected = 0
281         driver = None
282
283         for test in tests:
284             driver = port.create_driver(worker_number=1, no_timeout=True)
285
286             if self._options.pause_before_testing:
287                 driver.start()
288                 if not self._host.user.confirm("Ready to run test?"):
289                     driver.stop()
290                     return unexpected
291
292             _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
293             if self._run_single_test(test, driver):
294                 expected = expected + 1
295             else:
296                 unexpected = unexpected + 1
297
298             _log.info('')
299
300             driver.stop()
301
302         return unexpected
303
304     def _run_single_test(self, test, driver):
305         start_time = time.time()
306
307         new_results = test.run(driver, self._options.time_out_ms)
308         if new_results:
309             self._results.update(new_results)
310         else:
311             _log.error('FAILED')
312
313         _log.debug("Finished: %f s" % (time.time() - start_time))
314
315         return new_results != None