Clean up ChunkedUpdateDrawingAreaProxy
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / layout_package / printing.py
1 #!/usr/bin/env python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Package that handles non-debug, non-file output for run-webkit-tests."""
31
32 import logging
33 import optparse
34 import os
35 import pdb
36
37 from webkitpy.layout_tests.layout_package import metered_stream
38 from webkitpy.layout_tests.layout_package import test_expectations
39
40 _log = logging.getLogger("webkitpy.layout_tests.printer")
41
42 TestExpectationsFile = test_expectations.TestExpectationsFile
43
44 NUM_SLOW_TESTS_TO_LOG = 10
45
46 PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected,"
47                  "unexpected-results,updates")
48 PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress,"
49                     "one-line-summary,slowest,timing,unexpected,"
50                     "unexpected-results,updates")
51
52 HELP_PRINTING = """
53 Output for run-webkit-tests is controlled by a comma-separated list of
54 values passed to --print.  Values either influence the overall output, or
55 the output at the beginning of the run, during the run, or at the end:
56
57 Overall options:
58     nothing             don't print anything. This overrides every other option
59     default             include the default options. This is useful for logging
60                         the default options plus additional settings.
61     everything          print everything (except the trace-* options and the
62                         detailed-progress option, see below for the full list )
63     misc                print miscellaneous things like blank lines
64
65 At the beginning of the run:
66     config              print the test run configuration
67     expected            print a summary of what is expected to happen
68                         (# passes, # failures, etc.)
69
70 During the run:
71     detailed-progress   print one dot per test completed
72     one-line-progress   print a one-line progress bar
73     unexpected          print any unexpected results as they occur
74     updates             print updates on which stage is executing
75     trace-everything    print detailed info on every test's results
76                         (baselines, expectation, time it took to run). If
77                         this is specified it will override the '*-progress'
78                         options, the 'trace-unexpected' option, and the
79                         'unexpected' option.
80     trace-unexpected    like 'trace-everything', but only for tests with
81                         unexpected results. If this option is specified,
82                         it will override the 'unexpected' option.
83
84 At the end of the run:
85     actual              print a summary of the actual results
86     slowest             print %(slowest)d slowest tests and the time they took
87     timing              print timing statistics
88     unexpected-results  print a list of the tests with unexpected results
89     one-line-summary    print a one-line summary of the run
90
91 Notes:
92     - 'detailed-progress' can only be used if running in a single thread
93       (using --child-processes=1) or a single queue of tests (using
94        --experimental-fully-parallel). If these conditions aren't true,
95       'one-line-progress' will be used instead.
96     - If both 'detailed-progress' and 'one-line-progress' are specified (and
97       both are possible), 'detailed-progress' will be used.
98     - If 'nothing' is specified, it overrides all of the other options.
99     - Specifying --verbose is equivalent to --print everything plus it
100       changes the format of the log messages to add timestamps and other
101       information. If you specify --verbose and --print X, then X overrides
102       the --print everything implied by --verbose.
103
104 --print 'everything' is equivalent to --print '%(everything)s'.
105
106 The default (--print default) is equivalent to --print '%(default)s'.
107 """ % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
108        'default': PRINT_DEFAULT}
109
110
111 def print_options():
112     return [
113         # Note: We use print_options rather than just 'print' because print
114         # is a reserved word.
115         # Note: Also, we don't specify a default value so we can detect when
116         # no flag is specified on the command line and use different defaults
117         # based on whether or not --verbose is specified (since --print
118         # overrides --verbose).
119         optparse.make_option("--print", dest="print_options",
120             help=("controls print output of test run. "
121                   "Use --help-printing for more.")),
122         optparse.make_option("--help-printing", action="store_true",
123             help="show detailed help on controlling print output"),
124         optparse.make_option("-v", "--verbose", action="store_true",
125             default=False, help="include debug-level logging"),
126     ]
127
128
129 def parse_print_options(print_options, verbose, child_processes,
130                         is_fully_parallel):
131     """Parse the options provided to --print and dedup and rank them.
132
133     Returns
134         a set() of switches that govern how logging is done
135
136     """
137     if print_options:
138         switches = set(print_options.split(','))
139     elif verbose:
140         switches = set(PRINT_EVERYTHING.split(','))
141     else:
142         switches = set(PRINT_DEFAULT.split(','))
143
144     if 'nothing' in switches:
145         return set()
146
147     if (child_processes != 1 and not is_fully_parallel and
148         'detailed-progress' in switches):
149         _log.warn("Can only print 'detailed-progress' if running "
150                   "with --child-processes=1 or "
151                   "with --experimental-fully-parallel. "
152                   "Using 'one-line-progress' instead.")
153         switches.discard('detailed-progress')
154         switches.add('one-line-progress')
155
156     if 'everything' in switches:
157         switches.discard('everything')
158         switches.update(set(PRINT_EVERYTHING.split(',')))
159
160     if 'default' in switches:
161         switches.discard('default')
162         switches.update(set(PRINT_DEFAULT.split(',')))
163
164     if 'detailed-progress' in switches:
165         switches.discard('one-line-progress')
166
167     if 'trace-everything' in switches:
168         switches.discard('detailed-progress')
169         switches.discard('one-line-progress')
170         switches.discard('trace-unexpected')
171         switches.discard('unexpected')
172
173     if 'trace-unexpected' in switches:
174         switches.discard('unexpected')
175
176     return switches
177
178
179 def _configure_logging(stream, verbose):
180     log_fmt = '%(message)s'
181     log_datefmt = '%y%m%d %H:%M:%S'
182     log_level = logging.INFO
183     if verbose:
184         log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)d '
185                    '%(levelname)s %(message)s')
186         log_level = logging.DEBUG
187
188     root = logging.getLogger()
189     handler = logging.StreamHandler(stream)
190     handler.setFormatter(logging.Formatter(log_fmt, None))
191     root.addHandler(handler)
192     root.setLevel(log_level)
193     return handler
194
195
196 def _restore_logging(handler_to_remove):
197     root = logging.getLogger()
198     root.handlers.remove(handler_to_remove)
199
200
201 class Printer(object):
202     """Class handling all non-debug-logging printing done by run-webkit-tests.
203
204     Printing from run-webkit-tests falls into two buckets: general or
205     regular output that is read only by humans and can be changed at any
206     time, and output that is parsed by buildbots (and humans) and hence
207     must be changed more carefully and in coordination with the buildbot
208     parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
209     log_parser/webkit_test_command.py script).
210
211     By default the buildbot-parsed code gets logged to stdout, and regular
212     output gets logged to stderr."""
213     def __init__(self, port, options, regular_output, buildbot_output,
214                  child_processes, is_fully_parallel):
215         """
216         Args
217           port               interface to port-specific routines
218           options            OptionParser object with command line settings
219           regular_output     stream to which output intended only for humans
220                              should be written
221           buildbot_output    stream to which output intended to be read by
222                              the buildbots (and humans) should be written
223           child_processes    number of parallel threads running (usually
224                              controlled by --child-processes)
225           is_fully_parallel  are the tests running in a single queue, or
226                              in shards (usually controlled by
227                              --experimental-fully-parallel)
228
229         Note that the last two args are separate rather than bundled into
230         the options structure so that this object does not assume any flags
231         set in options that weren't returned from logging_options(), above.
232         The two are used to determine whether or not we can sensibly use
233         the 'detailed-progress' option, or can only use 'one-line-progress'.
234         """
235         self._buildbot_stream = buildbot_output
236         self._options = options
237         self._port = port
238         self._stream = regular_output
239
240         # These are used for --print detailed-progress to track status by
241         # directory.
242         self._current_dir = None
243         self._current_progress_str = ""
244         self._current_test_number = 0
245
246         self._meter = metered_stream.MeteredStream(options.verbose,
247                                                    regular_output)
248         self._logging_handler = _configure_logging(self._meter,
249             options.verbose)
250
251         self.switches = parse_print_options(options.print_options,
252             options.verbose, child_processes, is_fully_parallel)
253
254     def cleanup(self):
255         """Restore logging configuration to its initial settings."""
256         if self._logging_handler:
257             _restore_logging(self._logging_handler)
258             self._logging_handler = None
259
260     def __del__(self):
261         self.cleanup()
262
263     # These two routines just hide the implementation of the switches.
264     def disabled(self, option):
265         return not option in self.switches
266
267     def enabled(self, option):
268         return option in self.switches
269
270     def help_printing(self):
271         self._write(HELP_PRINTING)
272
273     def print_actual(self, msg):
274         if self.disabled('actual'):
275             return
276         self._buildbot_stream.write("%s\n" % msg)
277
278     def print_config(self, msg):
279         self.write(msg, 'config')
280
281     def print_expected(self, msg):
282         self.write(msg, 'expected')
283
284     def print_timing(self, msg):
285         self.write(msg, 'timing')
286
287     def print_one_line_summary(self, total, expected, unexpected):
288         """Print a one-line summary of the test run to stdout.
289
290         Args:
291           total: total number of tests run
292           expected: number of expected results
293           unexpected: number of unexpected results
294         """
295         if self.disabled('one-line-summary'):
296             return
297
298         incomplete = total - expected - unexpected
299         if incomplete:
300             self._write("")
301             incomplete_str = " (%d didn't run)" % incomplete
302             expected_str = str(expected)
303         else:
304             incomplete_str = ""
305             expected_str = "All %d" % expected
306
307         if unexpected == 0:
308             self._write("%s tests ran as expected%s." %
309                         (expected_str, incomplete_str))
310         elif expected == 1:
311             self._write("1 test ran as expected, %d didn't%s:" %
312                         (unexpected, incomplete_str))
313         else:
314             self._write("%d tests ran as expected, %d didn't%s:" %
315                         (expected, unexpected, incomplete_str))
316         self._write("")
317
318     def print_test_result(self, result, expected, exp_str, got_str):
319         """Print the result of the test as determined by --print.
320
321         This routine is used to print the details of each test as it completes.
322
323         Args:
324             result   - The actual TestResult object
325             expected - Whether the result we got was an expected result
326             exp_str  - What we expected to get (used for tracing)
327             got_str  - What we actually got (used for tracing)
328
329         Note that we need all of these arguments even though they seem
330         somewhat redundant, in order to keep this routine from having to
331         known anything about the set of expectations.
332         """
333         if (self.enabled('trace-everything') or
334             self.enabled('trace-unexpected') and not expected):
335             self._print_test_trace(result, exp_str, got_str)
336         elif (not expected and self.enabled('unexpected') and
337               self.disabled('detailed-progress')):
338             # Note: 'detailed-progress' handles unexpected results internally,
339             # so we skip it here.
340             self._print_unexpected_test_result(result)
341
342     def _print_test_trace(self, result, exp_str, got_str):
343         """Print detailed results of a test (triggered by --print trace-*).
344         For each test, print:
345            - location of the expected baselines
346            - expected results
347            - actual result
348            - timing info
349         """
350         filename = result.filename
351         test_name = self._port.relative_test_filename(filename)
352         self._write('trace: %s' % test_name)
353         txt_file = self._port.expected_filename(filename, '.txt')
354         if self._port.path_exists(txt_file):
355             self._write('  txt: %s' %
356                         self._port.relative_test_filename(txt_file))
357         else:
358             self._write('  txt: <none>')
359         checksum_file = self._port.expected_filename(filename, '.checksum')
360         if self._port.path_exists(checksum_file):
361             self._write('  sum: %s' %
362                         self._port.relative_test_filename(checksum_file))
363         else:
364             self._write('  sum: <none>')
365         png_file = self._port.expected_filename(filename, '.png')
366         if self._port.path_exists(png_file):
367             self._write('  png: %s' %
368                         self._port.relative_test_filename(png_file))
369         else:
370             self._write('  png: <none>')
371         self._write('  exp: %s' % exp_str)
372         self._write('  got: %s' % got_str)
373         self._write(' took: %-.3f' % result.test_run_time)
374         self._write('')
375
376     def _print_unexpected_test_result(self, result):
377         """Prints one unexpected test result line."""
378         desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
379         self.write("  %s -> unexpected %s" %
380                    (self._port.relative_test_filename(result.filename),
381                     desc), "unexpected")
382
383     def print_progress(self, result_summary, retrying, test_list):
384         """Print progress through the tests as determined by --print."""
385         if self.enabled('detailed-progress'):
386             self._print_detailed_progress(result_summary, test_list)
387         elif self.enabled('one-line-progress'):
388             self._print_one_line_progress(result_summary, retrying)
389         else:
390             return
391
392         if result_summary.remaining == 0:
393             self._meter.update('')
394
395     def _print_one_line_progress(self, result_summary, retrying):
396         """Displays the progress through the test run."""
397         percent_complete = 100 * (result_summary.expected +
398             result_summary.unexpected) / result_summary.total
399         action = "Testing"
400         if retrying:
401             action = "Retrying"
402         self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
403             " %d left" % (action, percent_complete, result_summary.expected,
404              result_summary.unexpected, result_summary.remaining))
405
406     def _print_detailed_progress(self, result_summary, test_list):
407         """Display detailed progress output where we print the directory name
408         and one dot for each completed test. This is triggered by
409         "--log detailed-progress"."""
410         if self._current_test_number == len(test_list):
411             return
412
413         next_test = test_list[self._current_test_number]
414         next_dir = os.path.dirname(
415             self._port.relative_test_filename(next_test))
416         if self._current_progress_str == "":
417             self._current_progress_str = "%s: " % (next_dir)
418             self._current_dir = next_dir
419
420         while next_test in result_summary.results:
421             if next_dir != self._current_dir:
422                 self._meter.write("%s\n" % (self._current_progress_str))
423                 self._current_progress_str = "%s: ." % (next_dir)
424                 self._current_dir = next_dir
425             else:
426                 self._current_progress_str += "."
427
428             if (next_test in result_summary.unexpected_results and
429                 self.enabled('unexpected')):
430                 self._meter.write("%s\n" % self._current_progress_str)
431                 test_result = result_summary.results[next_test]
432                 self._print_unexpected_test_result(test_result)
433                 self._current_progress_str = "%s: " % self._current_dir
434
435             self._current_test_number += 1
436             if self._current_test_number == len(test_list):
437                 break
438
439             next_test = test_list[self._current_test_number]
440             next_dir = os.path.dirname(
441                 self._port.relative_test_filename(next_test))
442
443         if result_summary.remaining:
444             remain_str = " (%d)" % (result_summary.remaining)
445             self._meter.progress("%s%s" % (self._current_progress_str,
446                                            remain_str))
447         else:
448             self._meter.progress("%s" % (self._current_progress_str))
449
450     def print_unexpected_results(self, unexpected_results):
451         """Prints a list of the unexpected results to the buildbot stream."""
452         if self.disabled('unexpected-results'):
453             return
454
455         passes = {}
456         flaky = {}
457         regressions = {}
458
459         for test, results in unexpected_results['tests'].iteritems():
460             actual = results['actual'].split(" ")
461             expected = results['expected'].split(" ")
462             if actual == ['PASS']:
463                 if 'CRASH' in expected:
464                     _add_to_dict_of_lists(passes,
465                                           'Expected to crash, but passed',
466                                           test)
467                 elif 'TIMEOUT' in expected:
468                     _add_to_dict_of_lists(passes,
469                                           'Expected to timeout, but passed',
470                                            test)
471                 else:
472                     _add_to_dict_of_lists(passes,
473                                           'Expected to fail, but passed',
474                                           test)
475             elif len(actual) > 1:
476                 # We group flaky tests by the first actual result we got.
477                 _add_to_dict_of_lists(flaky, actual[0], test)
478             else:
479                 _add_to_dict_of_lists(regressions, results['actual'], test)
480
481         if len(passes) or len(flaky) or len(regressions):
482             self._buildbot_stream.write("\n")
483
484         if len(passes):
485             for key, tests in passes.iteritems():
486                 self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
487                 tests.sort()
488                 for test in tests:
489                     self._buildbot_stream.write("  %s\n" % test)
490                 self._buildbot_stream.write("\n")
491             self._buildbot_stream.write("\n")
492
493         if len(flaky):
494             descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
495             for key, tests in flaky.iteritems():
496                 result = TestExpectationsFile.EXPECTATIONS[key.lower()]
497                 self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
498                     % (descriptions[result][1], len(tests)))
499                 tests.sort()
500
501                 for test in tests:
502                     result = unexpected_results['tests'][test]
503                     actual = result['actual'].split(" ")
504                     expected = result['expected'].split(" ")
505                     result = TestExpectationsFile.EXPECTATIONS[key.lower()]
506                     new_expectations_list = list(set(actual) | set(expected))
507                     self._buildbot_stream.write("  %s = %s\n" %
508                         (test, " ".join(new_expectations_list)))
509                 self._buildbot_stream.write("\n")
510             self._buildbot_stream.write("\n")
511
512         if len(regressions):
513             descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
514             for key, tests in regressions.iteritems():
515                 result = TestExpectationsFile.EXPECTATIONS[key.lower()]
516                 self._buildbot_stream.write(
517                     "Regressions: Unexpected %s : (%d)\n" % (
518                     descriptions[result][1], len(tests)))
519                 tests.sort()
520                 for test in tests:
521                     self._buildbot_stream.write("  %s = %s\n" % (test, key))
522                 self._buildbot_stream.write("\n")
523             self._buildbot_stream.write("\n")
524
525         if len(unexpected_results['tests']) and self._options.verbose:
526             self._buildbot_stream.write("%s\n" % ("-" * 78))
527
528     def print_update(self, msg):
529         if self.disabled('updates'):
530             return
531         self._meter.update(msg)
532
533     def write(self, msg, option="misc"):
534         if self.disabled(option):
535             return
536         self._write(msg)
537
538     def _write(self, msg):
539         # FIXME: we could probably get away with calling _log.info() all of
540         # the time, but there doesn't seem to be a good way to test the output
541         # from the logger :(.
542         if self._options.verbose:
543             _log.info(msg)
544         else:
545             self._meter.write("%s\n" % msg)
546
547 #
548 # Utility routines used by the Controller class
549 #
550
551
552 def _add_to_dict_of_lists(dict, key, value):
553     dict.setdefault(key, []).append(value)