8f5067b43380cb8a23013a6bd3d39eeb88195218
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / views / printing.py
1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 """Package that handles non-debug, non-file output for run-webkit-tests."""
30
31 import math
32 import optparse
33
34 from webkitpy.tool import grammar
35 from webkitpy.layout_tests.models import test_expectations
36 from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
37 from webkitpy.layout_tests.views.metered_stream import MeteredStream
38
39
40 NUM_SLOW_TESTS_TO_LOG = 10
41
42
43 def print_options():
44     return [
45         optparse.make_option('-q', '--quiet', action='store_true', default=False,
46                              help='run quietly (errors, warnings, and progress only)'),
47         optparse.make_option('-v', '--verbose', action='store_true', default=False,
48                              help='print a summarized result for every test (one line per test)'),
49         optparse.make_option('--details', action='store_true', default=False,
50                              help='print detailed results for every test'),
51         optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
52                              help='print timestamps and debug information for run-webkit-tests itself'),
53     ]
54
55
56 class Printer(object):
57     """Class handling all non-debug-logging printing done by run-webkit-tests."""
58
59     def __init__(self, port, options, regular_output, logger=None):
60         self.num_started = 0
61         self.num_tests = 0
62         self._port = port
63         self._options = options
64         self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
65                                     number_of_columns=self._port.host.platform.terminal_width())
66         self._running_tests = []
67         self._completed_tests = []
68
69     def cleanup(self):
70         self._meter.cleanup()
71
72     def __del__(self):
73         self.cleanup()
74
75     def print_config(self, results_directory):
76         self._print_default("Using port '%s'" % self._port.name())
77         self._print_default("Test configuration: %s" % self._port.test_configuration())
78         self._print_default("Placing test results in %s" % results_directory)
79
80         # FIXME: should these options be in printing_options?
81         if self._options.new_baseline:
82             self._print_default("Placing new baselines in %s" % self._port.baseline_path())
83
84         fs = self._port.host.filesystem
85         fallback_path = [fs.relpath(x, self._port.layout_tests_dir()).replace("../", "") for x in self._port.baseline_search_path()]
86         self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
87
88         self._print_default("Using %s build" % self._options.configuration)
89         if self._options.pixel_tests:
90             self._print_default("Pixel tests enabled")
91         else:
92             self._print_default("Pixel tests disabled")
93
94         self._print_default("Regular timeout: %s, slow test timeout: %s" %
95                   (self._options.time_out_ms, self._options.slow_time_out_ms))
96
97         self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line_for_logging()))
98         self._print_default('')
99
100     def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
101         found_str = 'Found %s; running %d' % (grammar.pluralize(num_all_test_files, "test"), num_to_run)
102         if repeat_each * iterations > 1:
103             found_str += ' (%s each: --repeat-each=%d --iterations=%d)' % (grammar.pluralize(repeat_each * iterations, "time"), repeat_each, iterations)
104         found_str += ', skipping %d' % (num_all_test_files - num_to_run)
105         self._print_default(found_str + '.')
106
107     def print_expected(self, run_results, tests_with_result_type_callback):
108         self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
109         self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
110         self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
111         self._print_debug('')
112
113     def print_workers_and_shards(self, num_workers, num_shards):
114         driver_name = self._port.driver_name()
115
116         if num_workers == 1:
117             self._print_default('Running 1 {}.'.format(driver_name))
118             self._print_debug('({}).'.format(grammar.pluralize(num_shards, "shard")))
119         else:
120             self._print_default('Running {} in parallel.'.format(grammar.pluralize(num_workers, driver_name)))
121             self._print_debug('({} shards).'.format(num_shards))
122         self._print_default('')
123
124     def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
125         tests = tests_with_result_type_callback(result_type)
126         now = run_results.tests_by_timeline[test_expectations.NOW]
127         wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
128
129         # We use a fancy format string in order to print the data out in a
130         # nicely-aligned table.
131         fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
132                   % (self._num_digits(now), self._num_digits(wontfix)))
133         self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
134
135     def _num_digits(self, num):
136         ndigits = 1
137         if len(num):
138             ndigits = int(math.log10(len(num))) + 1
139         return ndigits
140
141     def print_results(self, run_time, run_results, summarized_results):
142         self._print_timing_statistics(run_time, run_results)
143         self._print_one_line_summary(run_results.total - run_results.expected_skips,
144                                      run_results.expected - run_results.expected_skips,
145                                      run_results.unexpected)
146
147     def _print_timing_statistics(self, total_time, run_results):
148         self._print_debug("Test timing:")
149         self._print_debug("  %6.2f total testing time" % total_time)
150         self._print_debug("")
151
152         self._print_worker_statistics(run_results, int(self._options.child_processes))
153         self._print_aggregate_test_statistics(run_results)
154         self._print_individual_test_times(run_results)
155         self._print_directory_timings(run_results)
156
157     def _print_worker_statistics(self, run_results, num_workers):
158         self._print_debug("Thread timing:")
159         stats = {}
160         cuml_time = 0
161         for result in run_results.results_by_name.values():
162             stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
163             stats[result.worker_name]['num_tests'] += 1
164             stats[result.worker_name]['total_time'] += result.total_run_time
165             cuml_time += result.total_run_time
166
167         for worker_name in stats:
168             self._print_debug("    %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
169         self._print_debug("   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
170         self._print_debug("")
171
172     def _print_aggregate_test_statistics(self, run_results):
173         times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
174         self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
175
176     def _print_individual_test_times(self, run_results):
177         # Reverse-sort by the time spent in DumpRenderTree.
178
179         individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
180         num_printed = 0
181         slow_tests = []
182         timeout_or_crash_tests = []
183         unexpected_slow_tests = []
184         for test_tuple in individual_test_timings:
185             test_name = test_tuple.test_name
186             is_timeout_crash_or_slow = False
187             if test_name in run_results.slow_tests:
188                 is_timeout_crash_or_slow = True
189                 slow_tests.append(test_tuple)
190
191             if test_name in run_results.failures_by_name:
192                 result = run_results.results_by_name[test_name].type
193                 if (result == test_expectations.TIMEOUT or
194                     result == test_expectations.CRASH):
195                     is_timeout_crash_or_slow = True
196                     timeout_or_crash_tests.append(test_tuple)
197
198             if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
199                 num_printed = num_printed + 1
200                 unexpected_slow_tests.append(test_tuple)
201
202         self._print_debug("")
203         self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
204             NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
205         self._print_debug("")
206         self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
207         self._print_debug("")
208         self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
209         self._print_debug("")
210
211     def _print_test_list_timing(self, title, test_list):
212         self._print_debug(title)
213         for test_tuple in test_list:
214             test_run_time = round(test_tuple.test_run_time, 1)
215             self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))
216
217     def _print_directory_timings(self, run_results):
218         stats = {}
219         for result in run_results.results_by_name.values():
220             stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
221             stats[result.shard_name]['num_tests'] += 1
222             stats[result.shard_name]['total_time'] += result.total_run_time
223
224         timings = []
225         for directory in stats:
226             timings.append((directory, round(stats[directory]['total_time'], 1), stats[directory]['num_tests']))
227         timings.sort()
228
229         self._print_debug("Time to process slowest subdirectories:")
230         min_seconds_to_print = 10
231         for timing in timings:
232             if timing[0] > min_seconds_to_print:
233                 self._print_debug("  %s took %s seconds to run %s tests." % timing)
234         self._print_debug("")
235
236     def _print_statistics_for_test_timings(self, title, timings):
237         self._print_debug(title)
238         timings.sort()
239
240         num_tests = len(timings)
241         if not num_tests:
242             return
243         percentile90 = timings[int(.9 * num_tests)]
244         percentile99 = timings[int(.99 * num_tests)]
245
246         if num_tests % 2 == 1:
247             median = timings[((num_tests - 1) / 2) - 1]
248         else:
249             lower = timings[num_tests / 2 - 1]
250             upper = timings[num_tests / 2]
251             median = (float(lower + upper)) / 2
252
253         mean = sum(timings) / num_tests
254
255         for timing in timings:
256             sum_of_deviations = math.pow(timing - mean, 2)
257
258         std_deviation = math.sqrt(sum_of_deviations / num_tests)
259         self._print_debug("  Median:          %6.3f" % median)
260         self._print_debug("  Mean:            %6.3f" % mean)
261         self._print_debug("  90th percentile: %6.3f" % percentile90)
262         self._print_debug("  99th percentile: %6.3f" % percentile99)
263         self._print_debug("  Standard dev:    %6.3f" % std_deviation)
264         self._print_debug("")
265
266     def _print_one_line_summary(self, total, expected, unexpected):
267         incomplete = total - expected - unexpected
268         incomplete_str = ''
269         if incomplete:
270             self._print_default("")
271             incomplete_str = " (%d didn't run)" % incomplete
272
273         if self._options.verbose or self._options.debug_rwt_logging or unexpected:
274             self.writeln("")
275
276         summary = ''
277         if unexpected == 0:
278             if expected == total:
279                 if expected > 1:
280                     summary = "All %d tests ran as expected." % expected
281                 else:
282                     summary = "The test ran as expected."
283             else:
284                 summary = "%s ran as expected%s." % (grammar.pluralize(expected, "test"), incomplete_str)
285         else:
286             summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize(expected, "test"), unexpected, incomplete_str)
287
288         self._print_quiet(summary)
289         self._print_quiet("")
290
291     def _test_status_line(self, test_name, suffix, truncate=True):
292         format_string = '[%d/%d] %s%s'
293         status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
294         if truncate and len(status_line) > self._meter.number_of_columns():
295             overflow_columns = len(status_line) - self._meter.number_of_columns()
296             ellipsis = '...'
297             if len(test_name) < overflow_columns + len(ellipsis) + 2:
298                 # We don't have enough space even if we elide, just show the test filename.
299                 fs = self._port.host.filesystem
300                 test_name = fs.split(test_name)[1]
301             else:
302                 new_length = len(test_name) - overflow_columns - len(ellipsis)
303                 prefix = int(new_length / 2)
304                 test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
305         return format_string % (self.num_started, self.num_tests, test_name, suffix)
306
307     def print_started_test(self, test_name):
308         self.num_started += 1
309         self._running_tests.append(test_name)
310         if len(self._running_tests) > 1:
311             suffix = ' (+%d)' % (len(self._running_tests) - 1)
312         else:
313             suffix = ''
314         if self._options.verbose:
315             write = self._meter.write_update
316         else:
317             write = self._meter.write_throttled_update
318         write(self._test_status_line(test_name, suffix))
319
320     def print_finished_test(self, result, expected, exp_str, got_str):
321         test_name = result.test_name
322
323         result_message = self._result_message(result.type, result.failures, expected, exp_str, self._options.verbose)
324
325         if self._options.details:
326             self._print_test_trace(result, exp_str, got_str)
327         elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
328             self.writeln(self._test_status_line(test_name, result_message, truncate=False))
329         elif self.num_started == self.num_tests:
330             self._meter.write_update('')
331         else:
332             if test_name == self._running_tests[0]:
333                 self._completed_tests.insert(0, [test_name, result_message])
334             else:
335                 self._completed_tests.append([test_name, result_message])
336
337             for test_name, result_message in self._completed_tests:
338                 self._meter.write_throttled_update(self._test_status_line(test_name, result_message, truncate=False))
339             self._completed_tests = []
340         self._running_tests.remove(test_name)
341
342     def _result_message(self, result_type, failures, expected, exp_str, verbose):
343         exp_string = ''
344         if not expected:
345             exp_string = ' (leak detection is pending)' if 'LEAK' in exp_str else ' unexpectedly'
346
347         if result_type == test_expectations.PASS:
348             return ' passed%s' % exp_string
349         else:
350             return ' failed%s (%s)' % (exp_string, ', '.join(failure.message() for failure in failures))
351
352     def _print_test_trace(self, result, exp_str, got_str):
353         test_name = result.test_name
354         self._print_default(self._test_status_line(test_name, ''))
355
356         for extension in ('.txt', '.png', '.wav', '.webarchive'):
357             self._print_baseline(test_name, extension)
358
359         self._print_default('  exp: %s' % exp_str)
360         self._print_default('  got: %s' % got_str)
361         self._print_default(' took: %-.3f' % result.test_run_time)
362         self._print_default('')
363
364     def _print_baseline(self, test_name, extension):
365         baseline = self._port.expected_filename(test_name, extension)
366         if self._port._filesystem.exists(baseline):
367             relpath = self._port.relative_test_filename(baseline)
368         else:
369             relpath = '<none>'
370         self._print_default('  %s: %s' % (extension[1:], relpath))
371
372     def _print_quiet(self, msg):
373         self.writeln(msg)
374
375     def _print_default(self, msg):
376         if not self._options.quiet:
377             self.writeln(msg)
378
379     def _print_debug(self, msg):
380         if self._options.debug_rwt_logging:
381             self.writeln(msg)
382
383     def write_update(self, msg):
384         self._meter.write_update(msg)
385
386     def writeln(self, msg):
387         self._meter.writeln(msg)
388
389     def flush(self):
390         self._meter.flush()