ae62458d99da7592a852fac7d3a86eb628996602
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner.py
1 #!/usr/bin/env python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Run Inspector's perf tests in perf mode."""
31
32 import json
33 import logging
34 import optparse
35 import re
36 import sys
37 import time
38
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.port.driver import DriverInput
43 from webkitpy.layout_tests.views import printing
44 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
45 from webkitpy.performance_tests.perftest import PerfTest
46
47 _log = logging.getLogger(__name__)
48
49
50 class PerfTestsRunner(object):
51     _test_directories_for_chromium_style_tests = ['inspector']
52     _default_branch = 'webkit-trunk'
53     _EXIT_CODE_BAD_BUILD = -1
54     _EXIT_CODE_BAD_JSON = -2
55     _EXIT_CODE_FAILED_UPLOADING = -3
56
57     def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
58         self._buildbot_output = buildbot_output
59         self._options, self._args = PerfTestsRunner._parse_args(args)
60         if port:
61             self._port = port
62             self._host = self._port.host
63         else:
64             self._host = Host()
65             self._port = self._host.port_factory.get(self._options.platform, self._options)
66         self._host._initialize_scm()
67         self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output)
68         self._webkit_base_dir_len = len(self._port.webkit_base())
69         self._base_path = self._port.perf_tests_dir()
70         self._results = {}
71         self._timestamp = time.time()
72
73     @staticmethod
74     def _parse_args(args=None):
75         print_options = printing.print_options()
76
77         perf_option_list = [
78             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
79                 help='Set the configuration to Debug'),
80             optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
81                 help='Set the configuration to Release'),
82             optparse.make_option("--platform",
83                 help="Specify port/platform being tested (i.e. chromium-mac)"),
84             optparse.make_option("--chromium",
85                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
86             optparse.make_option("--builder-name",
87                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
88             optparse.make_option("--build-number",
89                 help=("The build number of the builder running this script.")),
90             optparse.make_option("--build", dest="build", action="store_true", default=True,
91                 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
92             optparse.make_option("--build-directory",
93                 help="Path to the directory under which build files are kept (should not include configuration)"),
94             optparse.make_option("--time-out-ms", default=600 * 1000,
95                 help="Set the timeout for each test"),
96             optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
97                 help="Pause before running the tests to let user attach a performance monitor."),
98             optparse.make_option("--output-json-path",
99                 help="Filename of the JSON file that summaries the results"),
100             optparse.make_option("--source-json-path",
101                 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
102             optparse.make_option("--test-results-server",
103                 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
104             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
105                 help="Use WebKitTestRunner rather than DumpRenderTree."),
106             ]
107
108         option_list = (perf_option_list + print_options)
109         return optparse.OptionParser(option_list=option_list).parse_args(args)
110
111     def _collect_tests(self):
112         """Return the list of tests found."""
113
114         def _is_test_file(filesystem, dirname, filename):
115             return filename.endswith('.html')
116
117         filesystem = self._host.filesystem
118
119         paths = []
120         for arg in self._args:
121             paths.append(arg)
122             relpath = filesystem.relpath(arg, self._base_path)
123             if relpath:
124                 paths.append(relpath)
125
126         skipped_directories = set(['.svn', 'resources'])
127         test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
128         tests = []
129         for path in test_files:
130             relative_path = self._port.relative_perf_test_filename(path)
131             if self._port.skips_perf_test(relative_path):
132                 continue
133             test_name = relative_path.replace('\\', '/')
134             dirname = filesystem.dirname(path)
135             if self._host.filesystem.dirname(relative_path) in self._test_directories_for_chromium_style_tests:
136                 tests.append(ChromiumStylePerfTest(test_name, dirname, path))
137             else:
138                 tests.append(PerfTest(test_name, dirname, path))
139
140         return tests
141
142     def run(self):
143         if self._options.help_printing:
144             self._printer.help_printing()
145             self._printer.cleanup()
146             return 0
147
148         if not self._port.check_build(needs_http=False):
149             _log.error("Build not up to date for %s" % self._port._path_to_driver())
150             return self._EXIT_CODE_BAD_BUILD
151
152         # We wrap any parts of the run that are slow or likely to raise exceptions
153         # in a try/finally to ensure that we clean up the logging configuration.
154         unexpected = -1
155         try:
156             tests = self._collect_tests()
157             unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
158         finally:
159             self._printer.cleanup()
160
161         options = self._options
162         if self._options.output_json_path:
163             # FIXME: Add --branch or auto-detect the branch we're in
164             test_results_server = options.test_results_server
165             branch = self._default_branch if test_results_server else None
166             build_number = int(options.build_number) if options.build_number else None
167             if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
168                 branch, options.platform, options.builder_name, build_number) and not unexpected:
169                 return self._EXIT_CODE_BAD_JSON
170             if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
171                 return self._EXIT_CODE_FAILED_UPLOADING
172
173         return unexpected
174
175     def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
176         contents = {'timestamp': int(timestamp), 'results': self._results}
177         for (name, path) in self._port.repository_paths():
178             contents[name + '-revision'] = self._host.scm().svn_revision(path)
179
180         for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
181             if value:
182                 contents[key] = value
183
184         filesystem = self._host.filesystem
185         succeeded = False
186         if source_json_path:
187             try:
188                 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
189                 source_json = json.load(source_json_file)
190                 contents = dict(source_json.items() + contents.items())
191                 succeeded = True
192             except IOError, error:
193                 _log.error("Failed to read %s: %s" % (source_json_path, error))
194             except ValueError, error:
195                 _log.error("Failed to parse %s: %s" % (source_json_path, error))
196             except TypeError, error:
197                 _log.error("Failed to merge JSON files: %s" % error)
198             if not succeeded:
199                 return False
200
201         filesystem.write_text_file(output_json_path, json.dumps(contents))
202         return True
203
204     def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
205         uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
206         try:
207             response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
208         except Exception, error:
209             _log.error("Failed to upload JSON file in 120s: %s" % error)
210             return False
211
212         response_body = [line.strip('\n') for line in response]
213         if response_body != ['OK']:
214             _log.error("Uploaded JSON but got a bad response:")
215             for line in response_body:
216                 _log.error(line)
217             return False
218
219         self._printer.write("JSON file uploaded.")
220         return True
221
222     def _print_status(self, tests, expected, unexpected):
223         if len(tests) == expected + unexpected:
224             status = "Ran %d tests" % len(tests)
225         else:
226             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
227         if unexpected:
228             status += " (%d didn't run)" % unexpected
229         self._printer.write(status)
230
231     def _run_tests_set(self, tests, port):
232         result_count = len(tests)
233         expected = 0
234         unexpected = 0
235         driver = None
236
237         for test in tests:
238             driver = port.create_driver(worker_number=1, no_timeout=True)
239
240             if self._options.pause_before_testing:
241                 driver.start()
242                 if not self._host.user.confirm("Ready to run test?"):
243                     driver.stop()
244                     return unexpected
245
246             self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
247             if self._run_single_test(test, driver):
248                 expected = expected + 1
249             else:
250                 unexpected = unexpected + 1
251
252             self._printer.write('')
253
254             driver.stop()
255
256         return unexpected
257
258     _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
259
260     def _process_chromium_style_test_result(self, test, output):
261         test_failed = False
262         got_a_result = False
263         for line in re.split('\n', output.text):
264             resultLine = self._inspector_result_regex.match(line)
265             if resultLine:
266                 # FIXME: Store the unit
267                 self._results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
268                 self._buildbot_output.write("%s\n" % line)
269                 got_a_result = True
270             elif not len(line) == 0:
271                 test_failed = True
272                 self._printer.write("%s" % line)
273         return test_failed or not got_a_result
274
275     def _run_single_test(self, test, driver):
276         start_time = time.time()
277
278         output = driver.run_test(DriverInput(test.path_or_url(), self._options.time_out_ms, None, False))
279         new_results = None
280
281         if output.text == None:
282             pass
283         elif output.timeout:
284             self._printer.write('timeout: %s' % test.test_name())
285         elif output.crash:
286             self._printer.write('crash: %s' % test.test_name())
287         else:
288             new_results = test.parse_output(output, self._printer, self._buildbot_output)
289
290         if len(output.error):
291             self._printer.write('error:\n%s' % output.error)
292             new_results = None
293
294         if new_results:
295             self._results.update(new_results)
296         else:
297             self._printer.write('FAILED')
298
299         self._printer.write("Finished: %f s" % (time.time() - start_time))
300
301         return new_results != None