Test regressions are not detected when image result is missing
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / models / test_run_results.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 import logging
31
32 from webkitpy.layout_tests.models import test_expectations
33 from webkitpy.layout_tests.models import test_failures
34
35
36 _log = logging.getLogger(__name__)
37
38
39 class TestRunResults(object):
40     def __init__(self, expectations, num_tests):
41         self.total = num_tests
42         self.remaining = self.total
43         self.expectations = expectations
44         self.expected = 0
45         self.unexpected = 0
46         self.unexpected_failures = 0
47         self.unexpected_crashes = 0
48         self.unexpected_timeouts = 0
49         self.tests_by_expectation = {}
50         self.tests_by_timeline = {}
51         self.results_by_name = {}  # Map of test name to the last result for the test.
52         self.all_results = []  # All results from a run, including every iteration of every test.
53         self.unexpected_results_by_name = {}
54         self.failures_by_name = {}
55         self.total_failures = 0
56         self.expected_skips = 0
57         for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
58             self.tests_by_expectation[expectation] = set()
59         for timeline in test_expectations.TestExpectations.TIMELINES.values():
60             self.tests_by_timeline[timeline] = expectations.model().get_tests_with_timeline(timeline)
61         self.slow_tests = set()
62         self.interrupted = False
63
64     def add(self, test_result, expected, test_is_slow):
65         self.tests_by_expectation[test_result.type].add(test_result.test_name)
66         self.results_by_name[test_result.test_name] = test_result
67         if test_result.type != test_expectations.SKIP:
68             self.all_results.append(test_result)
69         self.remaining -= 1
70         if len(test_result.failures):
71             self.total_failures += 1
72             self.failures_by_name[test_result.test_name] = test_result.failures
73         if expected:
74             self.expected += 1
75             if test_result.type == test_expectations.SKIP:
76                 self.expected_skips += 1
77         else:
78             self.unexpected_results_by_name[test_result.test_name] = test_result
79             self.unexpected += 1
80             if len(test_result.failures):
81                 self.unexpected_failures += 1
82             if test_result.type == test_expectations.CRASH:
83                 self.unexpected_crashes += 1
84             elif test_result.type == test_expectations.TIMEOUT:
85                 self.unexpected_timeouts += 1
86         if test_is_slow:
87             self.slow_tests.add(test_result.test_name)
88
89
90 class RunDetails(object):
91     def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
92         self.exit_code = exit_code
93         self.summarized_results = summarized_results
94         self.initial_results = initial_results
95         self.retry_results = retry_results
96         self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
97
98
99 def _interpret_test_failures(failures):
100     test_dict = {}
101     failure_types = [type(failure) for failure in failures]
102     # FIXME: get rid of all this is_* values once there is a 1:1 map between
103     # TestFailure type and test_expectations.EXPECTATION.
104     if test_failures.FailureMissingAudio in failure_types:
105         test_dict['is_missing_audio'] = True
106
107     if test_failures.FailureMissingResult in failure_types:
108         test_dict['is_missing_text'] = True
109
110     if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
111         test_dict['is_missing_image'] = True
112
113     if 'image_diff_percent' not in test_dict:
114         for failure in failures:
115             if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
116                 test_dict['image_diff_percent'] = failure.diff_percent
117
118     return test_dict
119
120 # These results must match ones in print_unexpected_results() in views/buildbot_results.py.
121 def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=False, include_time_and_modifiers=False):
122     """Returns a dictionary containing a summary of the test runs, with the following fields:
123         'version': a version indicator
124         'fixable': The number of fixable tests (NOW - PASS)
125         'skipped': The number of skipped tests (NOW & SKIPPED)
126         'num_regressions': The number of non-flaky failures
127         'num_flaky': The number of flaky failures
128         'num_missing': The number of tests with missing results
129         'num_passes': The number of unexpected passes
130         'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
131     """
132     results = {}
133     results['version'] = 3
134
135     tbe = initial_results.tests_by_expectation
136     tbt = initial_results.tests_by_timeline
137     results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
138     results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
139
140     num_passes = 0
141     num_flaky = 0
142     num_missing = 0
143     num_regressions = 0
144     keywords = {}
145     for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
146         keywords[expectation_enum] = expecation_string.upper()
147
148     for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
149         keywords[modifier_enum] = modifier_string.upper()
150
151     tests = {}
152
153     for test_name, result in initial_results.results_by_name.iteritems():
154         # Note that if a test crashed in the original run, we ignore
155         # whether or not it crashed when we retried it (if we retried it),
156         # and always consider the result not flaky.
157         expected = expectations.model().get_expectations_string(test_name)
158         result_type = result.type
159         actual = [keywords[result_type]]
160
161         if result_type == test_expectations.SKIP:
162             continue
163
164         test_dict = {}
165         if result.has_stderr:
166             test_dict['has_stderr'] = True
167
168         if result.reftest_type:
169             test_dict.update(reftest_type=list(result.reftest_type))
170
171         if expectations.model().has_modifier(test_name, test_expectations.WONTFIX):
172             test_dict['wontfix'] = True
173
174         if result_type == test_expectations.PASS:
175             num_passes += 1
176             # FIXME: include passing tests that have stderr output.
177             if expected == 'PASS' and not include_passes:
178                 continue
179         elif result_type == test_expectations.CRASH:
180             if test_name in initial_results.unexpected_results_by_name:
181                 num_regressions += 1
182                 test_dict['report'] = 'REGRESSION'
183         elif result_type == test_expectations.MISSING:
184             if test_name in initial_results.unexpected_results_by_name:
185                 num_missing += 1
186                 test_dict['report'] = 'MISSING'
187         elif test_name in initial_results.unexpected_results_by_name:
188             if retry_results and test_name not in retry_results.unexpected_results_by_name:
189                 actual.extend(expectations.model().get_expectations_string(test_name).split(" "))
190                 num_flaky += 1
191                 test_dict['report'] = 'FLAKY'
192             elif retry_results:
193                 retry_result_type = retry_results.unexpected_results_by_name[test_name].type
194                 if result_type != retry_result_type:
195                     if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and (retry_result_type == test_expectations.IMAGE_PLUS_TEXT or retry_result_type == test_expectations.MISSING):
196                         if retry_result_type == test_expectations.MISSING:
197                             num_missing += 1
198                         num_regressions += 1
199                         test_dict['report'] = 'REGRESSION'
200                     else:
201                         num_flaky += 1
202                         test_dict['report'] = 'FLAKY'
203                     actual.append(keywords[retry_result_type])
204                 else:
205                     num_regressions += 1
206                     test_dict['report'] = 'REGRESSION'
207             else:
208                 num_regressions += 1
209                 test_dict['report'] = 'REGRESSION'
210
211         test_dict['expected'] = expected
212         test_dict['actual'] = " ".join(actual)
213         if include_time_and_modifiers:
214             test_dict['time'] = round(1000 * result.test_run_time)
215             # FIXME: Fix get_modifiers to return modifiers in new format.
216             test_dict['modifiers'] = ' '.join(expectations.model().get_modifiers(test_name)).replace('BUGWK', 'webkit.org/b/')
217
218         test_dict.update(_interpret_test_failures(result.failures))
219
220         if retry_results:
221             retry_result = retry_results.unexpected_results_by_name.get(test_name)
222             if retry_result:
223                 test_dict.update(_interpret_test_failures(retry_result.failures))
224
225         # Store test hierarchically by directory. e.g.
226         # foo/bar/baz.html: test_dict
227         # foo/bar/baz1.html: test_dict
228         #
229         # becomes
230         # foo: {
231         #     bar: {
232         #         baz.html: test_dict,
233         #         baz1.html: test_dict
234         #     }
235         # }
236         parts = test_name.split('/')
237         current_map = tests
238         for i, part in enumerate(parts):
239             if i == (len(parts) - 1):
240                 current_map[part] = test_dict
241                 break
242             if part not in current_map:
243                 current_map[part] = {}
244             current_map = current_map[part]
245
246     results['tests'] = tests
247     results['num_passes'] = num_passes
248     results['num_flaky'] = num_flaky
249     results['num_missing'] = num_missing
250     results['num_regressions'] = num_regressions
251     results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
252     results['interrupted'] = initial_results.interrupted  # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
253     results['layout_tests_dir'] = port_obj.layout_tests_dir()
254     results['has_pretty_patch'] = port_obj.pretty_patch.pretty_patch_available()
255     results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
256
257     try:
258         # We only use the svn revision for using trac links in the results.html file,
259         # Don't do this by default since it takes >100ms.
260         # FIXME: Do we really need to populate this both here and in the json_results_generator?
261         if port_obj.get_option("builder_name"):
262             port_obj.host.initialize_scm()
263             results['revision'] = port_obj.host.scm().head_svn_revision()
264     except Exception, e:
265         _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
266         # Handle cases where we're running outside of version control.
267         import traceback
268         _log.debug('Failed to learn head svn revision:')
269         _log.debug(traceback.format_exc())
270         results['revision'] = ""
271
272     return results