2 # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
14 # * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """Package that handles non-debug, non-file output for run-webkit-tests."""
34 from webkitpy.common.net import resultsjsonparser
35 from webkitpy.layout_tests.models.test_expectations import TestExpectations
36 from webkitpy.layout_tests.views.metered_stream import MeteredStream
38 NUM_SLOW_TESTS_TO_LOG = 10
40 PRINT_DEFAULT = "misc,one-line-progress,one-line-summary,unexpected,unexpected-results,updates"
41 PRINT_EVERYTHING = "actual,config,expected,misc,one-line-progress,one-line-summary,slowest,timing,unexpected,unexpected-results,updates"
44 Output for run-webkit-tests is controlled by a comma-separated list of
45 values passed to --print. Values either influence the overall output, or
46 the output at the beginning of the run, during the run, or at the end:
49 nothing don't print anything. This overrides every other option
50 default include the default options. This is useful for logging
51 the default options plus additional settings.
52 everything print (almost) everything (except the trace-* options,
53 see below for the full list )
54 misc print miscellaneous things like blank lines
56 At the beginning of the run:
57 config print the test run configuration
58 expected print a summary of what is expected to happen
59 (# passes, # failures, etc.)
62 one-line-progress print a one-line progress message or bar
63 unexpected print any unexpected results as they occur
64 updates print updates on which stage is executing
65 trace-everything print detailed info on every test's results
66 (baselines, expectation, time it took to run). If
67 this is specified it will override the '*-progress'
68 options, the 'trace-unexpected' option, and the
70 trace-unexpected like 'trace-everything', but only for tests with
71 unexpected results. If this option is specified,
72 it will override the 'unexpected' option.
74 At the end of the run:
75 actual print a summary of the actual results
76 slowest print %(slowest)d slowest tests and the time they took
77 timing print timing statistics
78 unexpected-results print a list of the tests with unexpected results
79 one-line-summary print a one-line summary of the run
82 - If 'nothing' is specified, it overrides all of the other options.
83 - Specifying --verbose is equivalent to --print everything plus it
84 changes the format of the log messages to add timestamps and other
85 information. If you specify --verbose and --print X, then X overrides
86 the --print everything implied by --verbose.
88 --print 'everything' is equivalent to --print '%(everything)s'.
90 The default (--print default) is equivalent to --print '%(default)s'.
91 """ % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
92 'default': PRINT_DEFAULT}
97 # Note: We use print_options rather than just 'print' because print
99 # Note: Also, we don't specify a default value so we can detect when
100 # no flag is specified on the command line and use different defaults
101 # based on whether or not --verbose is specified (since --print
102 # overrides --verbose).
103 optparse.make_option("--print", dest="print_options",
104 help=("controls print output of test run. "
105 "Use --help-printing for more.")),
106 optparse.make_option("--help-printing", action="store_true",
107 help="show detailed help on controlling print output"),
108 optparse.make_option("-v", "--verbose", action="store_true",
109 default=False, help="include debug-level logging"),
113 def parse_print_options(print_options, verbose):
114 """Parse the options provided to --print and dedup and rank them.
117 a set() of switches that govern how logging is done
121 switches = set(print_options.split(','))
123 switches = set(PRINT_EVERYTHING.split(','))
125 switches = set(PRINT_DEFAULT.split(','))
127 if 'nothing' in switches:
130 if 'everything' in switches:
131 switches.discard('everything')
132 switches.update(set(PRINT_EVERYTHING.split(',')))
134 if 'default' in switches:
135 switches.discard('default')
136 switches.update(set(PRINT_DEFAULT.split(',')))
138 if 'trace-everything' in switches:
139 switches.discard('one-line-progress')
140 switches.discard('trace-unexpected')
141 switches.discard('unexpected')
143 if 'trace-unexpected' in switches:
144 switches.discard('unexpected')
149 class Printer(object):
150 """Class handling all non-debug-logging printing done by run-webkit-tests.
152 Printing from run-webkit-tests falls into two buckets: general or
153 regular output that is read only by humans and can be changed at any
154 time, and output that is parsed by buildbots (and humans) and hence
155 must be changed more carefully and in coordination with the buildbot
156 parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
157 log_parser/webkit_test_command.py script).
159 By default the buildbot-parsed code gets logged to stdout, and regular
160 output gets logged to stderr."""
161 def __init__(self, port, options, regular_output, buildbot_output, logger=None):
164 port interface to port-specific routines
165 options OptionParser object with command line settings
166 regular_output stream to which output intended only for humans
168 buildbot_output stream to which output intended to be read by
169 the buildbots (and humans) should be written
170 logger optional logger to integrate into the stream.
173 self._options = options
174 self._buildbot_stream = buildbot_output
175 self._meter = MeteredStream(regular_output, options.verbose, logger=logger)
176 self.switches = parse_print_options(options.print_options, options.verbose)
179 self._meter.cleanup()
184 # These two routines just hide the implementation of the switches.
185 def disabled(self, option):
186 return not option in self.switches
188 def enabled(self, option):
189 return option in self.switches
191 def help_printing(self):
192 self._write(HELP_PRINTING)
194 def print_actual(self, msg):
195 if self.disabled('actual'):
197 self._buildbot_stream.write("%s\n" % msg)
199 def print_config(self, msg):
200 self.write(msg, 'config')
202 def print_expected(self, msg):
203 self.write(msg, 'expected')
205 def print_timing(self, msg):
206 self.write(msg, 'timing')
208 def print_one_line_summary(self, total, expected, unexpected):
209 """Print a one-line summary of the test run to stdout.
212 total: total number of tests run
213 expected: number of expected results
214 unexpected: number of unexpected results
216 if self.disabled('one-line-summary'):
219 incomplete = total - expected - unexpected
222 incomplete_str = " (%d didn't run)" % incomplete
223 expected_str = str(expected)
226 expected_str = "All %d" % expected
229 self._write("%s tests ran as expected%s." %
230 (expected_str, incomplete_str))
232 self._write("1 test ran as expected, %d didn't%s:" %
233 (unexpected, incomplete_str))
235 self._write("%d tests ran as expected, %d didn't%s:" %
236 (expected, unexpected, incomplete_str))
239 def print_test_result(self, result, expected, exp_str, got_str):
240 """Print the result of the test as determined by --print.
242 This routine is used to print the details of each test as it completes.
245 result - The actual TestResult object
246 expected - Whether the result we got was an expected result
247 exp_str - What we expected to get (used for tracing)
248 got_str - What we actually got (used for tracing)
250 Note that we need all of these arguments even though they seem
251 somewhat redundant, in order to keep this routine from having to
252 known anything about the set of expectations.
254 if (self.enabled('trace-everything') or
255 self.enabled('trace-unexpected') and not expected):
256 self._print_test_trace(result, exp_str, got_str)
257 elif not expected and self.enabled('unexpected'):
258 self._print_unexpected_test_result(result)
260 def _print_test_trace(self, result, exp_str, got_str):
261 """Print detailed results of a test (triggered by --print trace-*).
262 For each test, print:
263 - location of the expected baselines
268 test_name = result.test_name
269 self._write('trace: %s' % test_name)
271 base = self._port.lookup_virtual_test_base(test_name)
273 args = ' '.join(self._port.lookup_virtual_test_args(test_name))
274 self._write(' base: %s' % base)
275 self._write(' args: %s' % args)
277 for extension in ('.txt', '.png', '.wav', '.webarchive'):
278 self._print_baseline(test_name, extension)
280 self._write(' exp: %s' % exp_str)
281 self._write(' got: %s' % got_str)
282 self._write(' took: %-.3f' % result.test_run_time)
285 def _print_baseline(self, test_name, extension):
286 baseline = self._port.expected_filename(test_name, extension)
287 if self._port._filesystem.exists(baseline):
288 relpath = self._port.relative_test_filename(baseline)
291 self._write(' %s: %s' % (extension[1:], relpath))
293 def _print_unexpected_test_result(self, result):
294 """Prints one unexpected test result line."""
295 desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type][0]
296 self.write(" %s -> unexpected %s" % (result.test_name, desc), "unexpected")
298 def print_progress(self, result_summary, retrying, test_list):
299 """Print progress through the tests as determined by --print."""
300 if self.disabled('one-line-progress'):
303 if result_summary.remaining == 0:
304 self._meter.write_update('')
307 percent_complete = 100 * (result_summary.expected +
308 result_summary.unexpected) / result_summary.total
313 self._meter.write_throttled_update("%s (%d%%): %d ran as expected, %d didn't, %d left" %
314 (action, percent_complete, result_summary.expected,
315 result_summary.unexpected, result_summary.remaining))
317 def print_unexpected_results(self, unexpected_results):
318 """Prints a list of the unexpected results to the buildbot stream."""
319 if self.disabled('unexpected-results'):
326 def add_to_dict_of_lists(dict, key, value):
327 dict.setdefault(key, []).append(value)
329 def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
330 actual = results['actual'].split(" ")
331 expected = results['expected'].split(" ")
332 if actual == ['PASS']:
333 if 'CRASH' in expected:
334 add_to_dict_of_lists(passes,
335 'Expected to crash, but passed',
337 elif 'TIMEOUT' in expected:
338 add_to_dict_of_lists(passes,
339 'Expected to timeout, but passed',
342 add_to_dict_of_lists(passes,
343 'Expected to fail, but passed',
345 elif len(actual) > 1:
346 # We group flaky tests by the first actual result we got.
347 add_to_dict_of_lists(flaky, actual[0], test)
349 add_to_dict_of_lists(regressions, results['actual'], test)
351 resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)
353 if len(passes) or len(flaky) or len(regressions):
354 self._buildbot_stream.write("\n")
357 for key, tests in passes.iteritems():
358 self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
361 self._buildbot_stream.write(" %s\n" % test)
362 self._buildbot_stream.write("\n")
363 self._buildbot_stream.write("\n")
366 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
367 for key, tests in flaky.iteritems():
368 result = TestExpectations.EXPECTATIONS[key.lower()]
369 self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
370 % (descriptions[result][1], len(tests)))
374 result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
375 actual = result['actual'].split(" ")
376 expected = result['expected'].split(" ")
377 result = TestExpectations.EXPECTATIONS[key.lower()]
378 new_expectations_list = list(set(actual) | set(expected))
379 self._buildbot_stream.write(" %s = %s\n" %
380 (test, " ".join(new_expectations_list)))
381 self._buildbot_stream.write("\n")
382 self._buildbot_stream.write("\n")
385 descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
386 for key, tests in regressions.iteritems():
387 result = TestExpectations.EXPECTATIONS[key.lower()]
388 self._buildbot_stream.write(
389 "Regressions: Unexpected %s : (%d)\n" % (
390 descriptions[result][1], len(tests)))
393 self._buildbot_stream.write(" %s = %s\n" % (test, key))
394 self._buildbot_stream.write("\n")
395 self._buildbot_stream.write("\n")
397 if len(unexpected_results['tests']) and self._options.verbose:
398 self._buildbot_stream.write("%s\n" % ("-" * 78))
400 def print_update(self, msg):
401 if self.disabled('updates'):
403 self._meter.write_update(msg)
405 def write(self, msg, option="misc"):
406 if self.disabled(option):
410 def _write(self, msg):
411 self._meter.writeln(msg)