2 # Copyright (C) 2012 Google Inc. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
14 # * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """Run Inspector's perf tests in perf mode."""
39 from webkitpy.common import find_files
40 from webkitpy.common.host import Host
41 from webkitpy.common.net.file_uploader import FileUploader
42 from webkitpy.layout_tests.port.driver import DriverInput
43 from webkitpy.layout_tests.views import printing
45 _log = logging.getLogger(__name__)
48 class PerfTestsRunner(object):
49 _test_directories_for_chromium_style_tests = ['inspector']
50 _default_branch = 'webkit-trunk'
51 _EXIT_CODE_BAD_BUILD = -1
52 _EXIT_CODE_BAD_JSON = -2
53 _EXIT_CODE_FAILED_UPLOADING = -3
55 def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
56 self._buildbot_output = buildbot_output
57 self._options, self._args = PerfTestsRunner._parse_args(args)
60 self._host = self._port.host
63 self._port = self._host.port_factory.get(self._options.platform, self._options)
64 self._host._initialize_scm()
65 self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output)
66 self._webkit_base_dir_len = len(self._port.webkit_base())
67 self._base_path = self._port.perf_tests_dir()
69 self._timestamp = time.time()
72 def _parse_args(args=None):
73 print_options = printing.print_options()
76 optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
77 help='Set the configuration to Debug'),
78 optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
79 help='Set the configuration to Release'),
80 optparse.make_option("--platform",
81 help="Specify port/platform being tested (i.e. chromium-mac)"),
82 optparse.make_option("--chromium",
83 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
84 optparse.make_option("--builder-name",
85 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
86 optparse.make_option("--build-number",
87 help=("The build number of the builder running this script.")),
88 optparse.make_option("--build", dest="build", action="store_true", default=True,
89 help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
90 optparse.make_option("--build-directory",
91 help="Path to the directory under which build files are kept (should not include configuration)"),
92 optparse.make_option("--time-out-ms", default=600 * 1000,
93 help="Set the timeout for each test"),
94 optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
95 help="Pause before running the tests to let user attach a performance monitor."),
96 optparse.make_option("--output-json-path",
97 help="Filename of the JSON file that summaries the results"),
98 optparse.make_option("--source-json-path",
99 help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
100 optparse.make_option("--test-results-server",
101 help="Upload the generated JSON file to the specified server when --output-json-path is present"),
102 optparse.make_option("--webkit-test-runner", "-2", action="store_true",
103 help="Use WebKitTestRunner rather than DumpRenderTree."),
106 option_list = (perf_option_list + print_options)
107 return optparse.OptionParser(option_list=option_list).parse_args(args)
109 def _collect_tests(self):
110 """Return the list of tests found."""
112 def _is_test_file(filesystem, dirname, filename):
113 return filename.endswith('.html')
116 for arg in self._args:
118 relpath = self._host.filesystem.relpath(arg, self._base_path)
120 paths.append(relpath)
122 skipped_directories = set(['.svn', 'resources'])
123 tests = find_files.find(self._host.filesystem, self._base_path, paths, skipped_directories, _is_test_file)
124 return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
127 if self._options.help_printing:
128 self._printer.help_printing()
129 self._printer.cleanup()
132 if not self._port.check_build(needs_http=False):
133 _log.error("Build not up to date for %s" % self._port._path_to_driver())
134 return self._EXIT_CODE_BAD_BUILD
136 # We wrap any parts of the run that are slow or likely to raise exceptions
137 # in a try/finally to ensure that we clean up the logging configuration.
140 tests = self._collect_tests()
141 unexpected = self._run_tests_set(sorted(list(tests)), self._port)
143 self._printer.cleanup()
145 options = self._options
146 if self._options.output_json_path:
147 # FIXME: Add --branch or auto-detect the branch we're in
148 test_results_server = options.test_results_server
149 branch = self._default_branch if test_results_server else None
150 build_number = int(options.build_number) if options.build_number else None
151 if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
152 branch, options.platform, options.builder_name, build_number) and not unexpected:
153 return self._EXIT_CODE_BAD_JSON
154 if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
155 return self._EXIT_CODE_FAILED_UPLOADING
159 def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
160 contents = {'timestamp': int(timestamp), 'results': self._results}
161 for (name, path) in self._port.repository_paths():
162 contents[name + '-revision'] = self._host.scm().svn_revision(path)
164 for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
166 contents[key] = value
168 filesystem = self._host.filesystem
172 source_json_file = filesystem.open_text_file_for_reading(source_json_path)
173 source_json = json.load(source_json_file)
174 contents = dict(source_json.items() + contents.items())
176 except IOError, error:
177 _log.error("Failed to read %s: %s" % (source_json_path, error))
178 except ValueError, error:
179 _log.error("Failed to parse %s: %s" % (source_json_path, error))
180 except TypeError, error:
181 _log.error("Failed to merge JSON files: %s" % error)
185 filesystem.write_text_file(output_json_path, json.dumps(contents))
188 def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
189 uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
191 response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
192 except Exception, error:
193 _log.error("Failed to upload JSON file in 120s: %s" % error)
196 response_body = [line.strip('\n') for line in response]
197 if response_body != ['OK']:
198 _log.error("Uploaded JSON but got a bad response:")
199 for line in response_body:
203 self._printer.write("JSON file uploaded.")
206 def _print_status(self, tests, expected, unexpected):
207 if len(tests) == expected + unexpected:
208 status = "Ran %d tests" % len(tests)
210 status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
212 status += " (%d didn't run)" % unexpected
213 self._printer.write(status)
215 def _run_tests_set(self, tests, port):
216 result_count = len(tests)
222 driver = port.create_driver(worker_number=1, no_timeout=True)
224 if self._options.pause_before_testing:
226 if not self._host.user.confirm("Ready to run test?"):
230 relative_test_path = self._host.filesystem.relpath(test, self._base_path)
231 self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
233 is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
234 if self._run_single_test(test, driver, is_chromium_style):
235 expected = expected + 1
237 unexpected = unexpected + 1
239 self._printer.write('')
245 _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
247 def _process_chromium_style_test_result(self, test, output):
250 for line in re.split('\n', output.text):
251 resultLine = self._inspector_result_regex.match(line)
253 self._results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
254 self._buildbot_output.write("%s\n" % line)
256 elif not len(line) == 0:
258 self._printer.write("%s" % line)
259 return test_failed or not got_a_result
261 _lines_to_ignore_in_parser_result = [
262 re.compile(r'^Running \d+ times$'),
263 re.compile(r'^Ignoring warm-up '),
264 re.compile(r'^Info:'),
265 re.compile(r'^\d+(.\d+)?$'),
266 # Following are for handle existing test like Dromaeo
267 re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
268 re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
269 re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
271 def _should_ignore_line_in_parser_test_result(self, line):
274 for regex in self._lines_to_ignore_in_parser_result:
275 if regex.search(line):
279 def _process_parser_test_result(self, test, output):
282 filesystem = self._host.filesystem
283 category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
284 test_name = filesystem.splitext(test_name)[0]
286 keys = ['avg', 'median', 'stdev', 'min', 'max']
287 score_regex = re.compile(r'^(?P<key>' + r'|'.join(keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
289 for line in re.split('\n', output.text):
290 score = score_regex.match(line)
292 results[score.group('key')] = float(score.group('value'))
293 if score.group('unit'):
294 unit = score.group('unit')
297 if not self._should_ignore_line_in_parser_test_result(line):
299 self._printer.write("%s" % line)
301 if test_failed or set(keys) != set(results.keys()):
303 self._results[filesystem.join(category, test_name).replace('\\', '/')] = results
304 self._buildbot_output.write('RESULT %s: %s= %s %s\n' % (category, test_name, results['avg'], unit))
305 self._buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in keys[1:]]) + '\n')
308 def _run_single_test(self, test, driver, is_chromium_style):
310 start_time = time.time()
312 output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
314 if output.text == None:
317 self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
320 self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
323 if is_chromium_style:
324 test_failed = self._process_chromium_style_test_result(test, output)
326 test_failed = self._process_parser_test_result(test, output)
328 if len(output.error):
329 self._printer.write('error:\n%s' % output.error)
333 self._printer.write('FAILED')
335 self._printer.write("Finished: %f s" % (time.time() - start_time))
337 return not test_failed