webkitpy: Implement device type specific expected results (Part 2)
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / models / test_run_results.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 import datetime
31 import logging
32 import signal
33
34 from webkitpy.layout_tests.models import test_expectations
35 from webkitpy.layout_tests.models import test_failures
36
37
38 _log = logging.getLogger(__name__)
39
40 INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
41
42
43 class TestRunResults(object):
44     def __init__(self, expectations, num_tests):
45         self.total = num_tests
46         self.remaining = self.total
47         self.expectations = expectations
48         self.expected = 0
49         self.unexpected = 0
50         self.unexpected_failures = 0
51         self.unexpected_crashes = 0
52         self.unexpected_timeouts = 0
53         self.tests_by_expectation = {}
54         self.tests_by_timeline = {}
55         self.results_by_name = {}  # Map of test name to the last result for the test.
56         self.all_results = []  # All results from a run, including every iteration of every test.
57         self.unexpected_results_by_name = {}
58         self.failures_by_name = {}
59         self.total_failures = 0
60         self.expected_skips = 0
61         for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
62             self.tests_by_expectation[expectation] = set()
63         for timeline in test_expectations.TestExpectations.TIMELINES.values():
64             self.tests_by_timeline[timeline] = expectations.model().get_tests_with_timeline(timeline)
65         self.slow_tests = set()
66         self.interrupted = False
67         self.keyboard_interrupted = False
68
69     def add(self, test_result, expected, test_is_slow):
70         self.tests_by_expectation[test_result.type].add(test_result.test_name)
71         self.results_by_name[test_result.test_name] = test_result
72         if test_result.is_other_crash:
73             return
74         if test_result.type != test_expectations.SKIP:
75             self.all_results.append(test_result)
76         self.remaining -= 1
77         if len(test_result.failures):
78             self.total_failures += 1
79             self.failures_by_name[test_result.test_name] = test_result.failures
80         if expected:
81             self.expected += 1
82             if test_result.type == test_expectations.SKIP:
83                 self.expected_skips += 1
84         else:
85             self.unexpected_results_by_name[test_result.test_name] = test_result
86             self.unexpected += 1
87             if len(test_result.failures):
88                 self.unexpected_failures += 1
89             if test_result.type == test_expectations.CRASH:
90                 self.unexpected_crashes += 1
91             elif test_result.type == test_expectations.TIMEOUT:
92                 self.unexpected_timeouts += 1
93         if test_is_slow:
94             self.slow_tests.add(test_result.test_name)
95
96     def change_result_to_failure(self, existing_result, new_result, existing_expected, new_expected):
97         assert existing_result.test_name == new_result.test_name
98         if existing_result.type is new_result.type:
99             return
100
101         self.tests_by_expectation[existing_result.type].remove(existing_result.test_name)
102         self.tests_by_expectation[new_result.type].add(new_result.test_name)
103
104         had_failures = len(existing_result.failures) > 0
105
106         existing_result.convert_to_failure(new_result)
107
108         if not had_failures and len(existing_result.failures):
109             self.total_failures += 1
110
111         if len(existing_result.failures):
112             self.failures_by_name[existing_result.test_name] = existing_result.failures
113
114         if not existing_expected and new_expected:
115             # test changed from unexpected to expected
116             self.expected += 1
117             self.unexpected_results_by_name.pop(existing_result.test_name, None)
118             self.unexpected -= 1
119             if had_failures:
120                 self.unexpected_failures -= 1
121         elif existing_expected and not new_expected:
122             # test changed from expected to unexpected
123             self.expected -= 1
124             self.unexpected_results_by_name[existing_result.test_name] = existing_result
125             self.unexpected += 1
126             if len(existing_result.failures):
127                 self.unexpected_failures += 1
128
129     def merge(self, test_run_results):
130         if not test_run_results:
131             return self
132         # self.expectations should be the same for both
133         self.total += test_run_results.total
134         self.remaining += test_run_results.remaining
135         self.expected += test_run_results.expected
136         self.unexpected += test_run_results.unexpected
137         self.unexpected_failures += test_run_results.unexpected_failures
138         self.unexpected_crashes += test_run_results.unexpected_crashes
139         self.unexpected_timeouts += test_run_results.unexpected_timeouts
140         self.tests_by_expectation.update(test_run_results.tests_by_expectation)
141         self.tests_by_timeline.update(test_run_results.tests_by_timeline)
142         self.results_by_name.update(test_run_results.results_by_name)
143         self.all_results += test_run_results.all_results
144         self.unexpected_results_by_name.update(test_run_results.unexpected_results_by_name)
145         self.failures_by_name.update(test_run_results.failures_by_name)
146         self.total_failures += test_run_results.total_failures
147         self.expected_skips += test_run_results.expected_skips
148         self.tests_by_expectation.update(test_run_results.tests_by_expectation)
149         self.tests_by_timeline.update(test_run_results.tests_by_timeline)
150         self.slow_tests.update(test_run_results.slow_tests)
151
152         self.interrupted |= test_run_results.interrupted
153         self.keyboard_interrupted |= test_run_results.keyboard_interrupted
154         return self
155
156
157 class RunDetails(object):
158     def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
159         self.exit_code = exit_code
160         self.summarized_results = summarized_results
161         self.initial_results = initial_results
162         self.retry_results = retry_results
163         self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
164
165
166 def _interpret_test_failures(failures):
167     test_dict = {}
168
169     failure_types = [type(failure) for failure in failures]
170     # FIXME: get rid of all this is_* values once there is a 1:1 map between
171     # TestFailure type and test_expectations.EXPECTATION.
172     if test_failures.FailureMissingAudio in failure_types:
173         test_dict['is_missing_audio'] = True
174
175     if test_failures.FailureMissingResult in failure_types:
176         test_dict['is_missing_text'] = True
177
178     if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
179         test_dict['is_missing_image'] = True
180
181     if test_failures.FailureDocumentLeak in failure_types:
182         leaks = []
183         for failure in failures:
184             if isinstance(failure, test_failures.FailureDocumentLeak):
185                 for url in failure.leaked_document_urls:
186                     leaks.append({"document": url})
187         test_dict['leaks'] = leaks
188
189     if 'image_diff_percent' not in test_dict:
190         for failure in failures:
191             if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
192                 test_dict['image_diff_percent'] = failure.diff_percent
193
194     return test_dict
195
196
197 # These results must match ones in print_unexpected_results() in views/buildbot_results.py.
198 def summarize_results(port_obj, expectations_by_type, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=False, include_time_and_modifiers=False):
199     """Returns a dictionary containing a summary of the test runs, with the following fields:
200         'version': a version indicator
201         'fixable': The number of fixable tests (NOW - PASS)
202         'skipped': The number of skipped tests (NOW & SKIPPED)
203         'num_regressions': The number of non-flaky failures
204         'num_flaky': The number of flaky failures
205         'num_missing': The number of tests with missing results
206         'num_passes': The number of unexpected passes
207         'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
208         'date': the current date and time
209     """
210     results = {}
211     results['version'] = 4
212
213     tbe = initial_results.tests_by_expectation
214     tbt = initial_results.tests_by_timeline
215     results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
216     results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
217
218     num_passes = 0
219     num_flaky = 0
220     num_missing = 0
221     num_regressions = 0
222     keywords = {}
223     for expectation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
224         keywords[expectation_enum] = expectation_string.upper()
225
226     for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
227         keywords[modifier_enum] = modifier_string.upper()
228
229     tests = {}
230     other_crashes_dict = {}
231
232     for test_name, result in initial_results.results_by_name.iteritems():
233         # Note that if a test crashed in the original run, we ignore
234         # whether or not it crashed when we retried it (if we retried it),
235         # and always consider the result not flaky.
236         pixel_tests_enabled = enabled_pixel_tests_in_retry or port_obj._options.pixel_tests or bool(result.reftest_type)
237
238         # We're basically trying to find the first non-skip expectation, and use that expectation object for the remainder of the loop.
239         # This works because tests are run on the first device type which won't skip them, regardless of other expectations, and never re-run.
240         expected = 'SKIP'
241         expectations = expectations_by_type.values()[0]
242         for element in expectations_by_type.itervalues():
243             test_expectation = element.filtered_expectations_for_test(test_name, pixel_tests_enabled, port_obj._options.world_leaks)
244             expected = element.model().expectations_to_string(test_expectation)
245             if expected != 'SKIP':
246                 expectations = element
247                 continue
248
249         result_type = result.type
250         actual = [keywords[result_type]]
251
252         if result_type == test_expectations.SKIP:
253             continue
254
255         if result.is_other_crash:
256             other_crashes_dict[test_name] = {}
257             continue
258
259         test_dict = {}
260         if result.has_stderr:
261             test_dict['has_stderr'] = True
262
263         if result.reftest_type:
264             test_dict.update(reftest_type=list(result.reftest_type))
265
266         if expectations.model().has_modifier(test_name, test_expectations.WONTFIX):
267             test_dict['wontfix'] = True
268
269         if result_type == test_expectations.PASS:
270             num_passes += 1
271             # FIXME: include passing tests that have stderr output.
272             if expected == 'PASS' and not include_passes:
273                 continue
274         elif result_type == test_expectations.CRASH:
275             if test_name in initial_results.unexpected_results_by_name:
276                 num_regressions += 1
277                 test_dict['report'] = 'REGRESSION'
278         elif result_type == test_expectations.MISSING:
279             if test_name in initial_results.unexpected_results_by_name:
280                 num_missing += 1
281                 test_dict['report'] = 'MISSING'
282         elif test_name in initial_results.unexpected_results_by_name:
283             if retry_results and test_name not in retry_results.unexpected_results_by_name:
284                 actual.extend(expectations.model().get_expectations_string(test_name).split(" "))
285                 num_flaky += 1
286                 test_dict['report'] = 'FLAKY'
287             elif retry_results:
288                 retry_result_type = retry_results.unexpected_results_by_name[test_name].type
289                 if result_type != retry_result_type:
290                     if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and (retry_result_type == test_expectations.IMAGE_PLUS_TEXT or retry_result_type == test_expectations.MISSING):
291                         if retry_result_type == test_expectations.MISSING:
292                             num_missing += 1
293                         num_regressions += 1
294                         test_dict['report'] = 'REGRESSION'
295                     else:
296                         num_flaky += 1
297                         test_dict['report'] = 'FLAKY'
298                     actual.append(keywords[retry_result_type])
299                 else:
300                     num_regressions += 1
301                     test_dict['report'] = 'REGRESSION'
302             else:
303                 num_regressions += 1
304                 test_dict['report'] = 'REGRESSION'
305
306         test_dict['expected'] = expected
307         test_dict['actual'] = " ".join(actual)
308         if include_time_and_modifiers:
309             test_dict['time'] = round(1000 * result.test_run_time)
310             # FIXME: Fix get_modifiers to return modifiers in new format.
311             test_dict['modifiers'] = ' '.join(expectations.model().get_modifiers(test_name)).replace('BUGWK', 'webkit.org/b/')
312
313         test_dict.update(_interpret_test_failures(result.failures))
314
315         if retry_results:
316             retry_result = retry_results.unexpected_results_by_name.get(test_name)
317             if retry_result:
318                 test_dict.update(_interpret_test_failures(retry_result.failures))
319
320         # Store test hierarchically by directory. e.g.
321         # foo/bar/baz.html: test_dict
322         # foo/bar/baz1.html: test_dict
323         #
324         # becomes
325         # foo: {
326         #     bar: {
327         #         baz.html: test_dict,
328         #         baz1.html: test_dict
329         #     }
330         # }
331         parts = test_name.split('/')
332         current_map = tests
333         for i, part in enumerate(parts):
334             if i == (len(parts) - 1):
335                 current_map[part] = test_dict
336                 break
337             if part not in current_map:
338                 current_map[part] = {}
339             current_map = current_map[part]
340
341     results['tests'] = tests
342     results['num_passes'] = num_passes
343     results['num_flaky'] = num_flaky
344     results['num_missing'] = num_missing
345     results['num_regressions'] = num_regressions
346     results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
347     results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
348     results['layout_tests_dir'] = port_obj.layout_tests_dir()
349     results['has_pretty_patch'] = port_obj.pretty_patch.pretty_patch_available()
350     results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
351     results['other_crashes'] = other_crashes_dict
352     results['date'] = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")
353
354     try:
355         # We only use the svn revision for using trac links in the results.html file,
356         # Don't do this by default since it takes >100ms.
357         # FIXME: Do we really need to populate this both here and in the json_results_generator?
358         if port_obj.get_option("builder_name"):
359             port_obj.host.initialize_scm()
360             results['revision'] = port_obj.host.scm().head_svn_revision()
361     except Exception as e:
362         _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
363         # Handle cases where we're running outside of version control.
364         import traceback
365         _log.debug('Failed to learn head svn revision:')
366         _log.debug(traceback.format_exc())
367         results['revision'] = ""
368
369     return results