976d5efcd186a0b1da7829170d2510311d7845c1
[WebKit-https.git] / Tools / Scripts / webkitpy / tool / bot / commitqueuetask_unittest.py
1 # Copyright (c) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 from datetime import datetime
30 import logging
31 import unittest
32
33 from webkitpy.common.net import bugzilla
34 from webkitpy.common.net.layouttestresults import LayoutTestResults
35 from webkitpy.common.system.executive import ScriptError
36 from webkitpy.common.system.outputcapture import OutputCapture
37 from webkitpy.layout_tests.models import test_results
38 from webkitpy.layout_tests.models import test_failures
39 from webkitpy.thirdparty.mock import Mock
40 from webkitpy.tool.bot.commitqueuetask import *
41 from webkitpy.tool.mocktool import MockTool
42
43 _log = logging.getLogger(__name__)
44
45
46 class MockCommitQueue(CommitQueueTaskDelegate):
47     def __init__(self, error_plan):
48         self._error_plan = error_plan
49         self._failure_status_id = 0
50         self._flaky_tests = []
51
52     def run_command(self, command):
53         _log.info("run_webkit_patch: %s" % command)
54         if self._error_plan:
55             error = self._error_plan.pop(0)
56             if error:
57                 raise error
58
59     def command_passed(self, success_message, patch):
60         _log.info("command_passed: success_message='%s' patch='%s'" % (
61             success_message, patch.id()))
62
63     def command_failed(self, failure_message, script_error, patch):
64         _log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
65             failure_message, script_error, patch.id()))
66         self._failure_status_id += 1
67         return self._failure_status_id
68
69     def refetch_patch(self, patch):
70         return patch
71
72     def test_results(self):
73         return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
74
75     def report_flaky_tests(self, patch, flaky_results, results_archive):
76         current_flaky_tests = [result.test_name for result in flaky_results]
77         self._flaky_tests += current_flaky_tests
78         _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), current_flaky_tests, results_archive.filename))
79
80     def get_reported_flaky_tests(self):
81         return self._flaky_tests
82
83     def archive_last_test_results(self, patch):
84         _log.info("archive_last_test_results: patch='%s'" % patch.id())
85         archive = Mock()
86         archive.filename = "mock-archive-%s.zip" % patch.id()
87         return archive
88
89     def build_style(self):
90         return "both"
91
92     def did_pass_testing_ews(self, patch):
93         return False
94
95
96 class FailingTestCommitQueue(MockCommitQueue):
97     def __init__(self, error_plan, test_failure_plan):
98         MockCommitQueue.__init__(self, error_plan)
99         self._test_run_counter = -1  # Special value to indicate tests have never been run.
100         self._test_failure_plan = test_failure_plan
101
102     def run_command(self, command):
103         if command[0] == "build-and-test":
104             self._test_run_counter += 1
105         MockCommitQueue.run_command(self, command)
106
107     def _mock_test_result(self, testname):
108         return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
109
110     def test_results(self):
111         # Doesn't make sense to ask for the test_results until the tests have run at least once.
112         assert(self._test_run_counter >= 0)
113         failures_for_run = self._test_failure_plan[self._test_run_counter]
114         assert(isinstance(failures_for_run, list))
115         results = LayoutTestResults(test_results=map(self._mock_test_result, failures_for_run), did_exceed_test_failure_limit=(len(failures_for_run) >= 10))
116         return results
117
118
119 class PatchAnalysisResult(object):
120     FAIL = "Fail"
121     DEFER = "Defer"
122     PASS = "Pass"
123
124
125 class MockSimpleTestPlanCommitQueue(MockCommitQueue):
126     def __init__(self, first_test_failures, second_test_failures, clean_test_failures):
127         MockCommitQueue.__init__(self, [])
128         self._did_run_clean_tests = False
129         self._patch_test_results = [first_test_failures, second_test_failures]
130         self._clean_test_results = [clean_test_failures]
131         self._current_test_results = []
132
133     def run_command(self, command):
134         MockCommitQueue.run_command(self, command)
135         if command[0] == "build-and-test":
136             if "--no-clean" in command:
137                 self._current_test_results = self._patch_test_results.pop(0)
138             else:
139                 self._current_test_results = self._clean_test_results.pop(0)
140                 self._did_run_clean_tests = True
141
142             if self._current_test_results:
143                 raise ScriptError("MOCK test failure")
144
145     def _mock_test_result(self, testname):
146         return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
147
148     def test_results(self):
149         assert(isinstance(self._current_test_results, list))
150         return LayoutTestResults(test_results=map(self._mock_test_result, self._current_test_results), did_exceed_test_failure_limit=(len(self._current_test_results) >= 10))
151
152     def did_run_clean_tests(self):
153         return self._did_run_clean_tests
154
155
156 # We use GoldenScriptError to make sure that the code under test throws the
157 # correct (i.e., golden) exception.
158 class GoldenScriptError(ScriptError):
159     pass
160
161
162 _lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
163
164
165 class CommitQueueTaskTest(unittest.TestCase):
166     def _run_and_expect_patch_analysis_result(self, commit_queue, expected_analysis_result, expected_reported_flaky_tests=[], expect_clean_tests_to_run=False, expected_failure_status_id=0):
167         tool = MockTool(log_executive=True)
168         patch = tool.bugs.fetch_attachment(10000)
169         task = CommitQueueTask(commit_queue, patch)
170
171         try:
172             result = task.run()
173             if result:
174                 analysis_result = PatchAnalysisResult.PASS
175             else:
176                 analysis_result = PatchAnalysisResult.DEFER
177         except ScriptError:
178             analysis_result = PatchAnalysisResult.FAIL
179
180         self.assertEqual(analysis_result, expected_analysis_result)
181         self.assertEqual(frozenset(commit_queue.get_reported_flaky_tests()), frozenset(expected_reported_flaky_tests))
182         self.assertEqual(commit_queue.did_run_clean_tests(), expect_clean_tests_to_run)
183
184         # The failure status only means anything if we actually failed.
185         if expected_analysis_result == PatchAnalysisResult.FAIL:
186             self.assertEqual(task.failure_status_id, expected_failure_status_id)
187             self.assertIsInstance(task.results_from_patch_test_run(patch), LayoutTestResults)
188
189     def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
190         self.maxDiff = None
191         tool = MockTool(log_executive=True)
192         patch = tool.bugs.fetch_attachment(10000)
193         task = CommitQueueTask(commit_queue, patch)
194         success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception)
195         if not expected_exception:
196             self.assertEqual(success, not expect_retry)
197         return task
198
199     def test_success_case(self):
200         commit_queue = MockCommitQueue([])
201         expected_logs = """run_webkit_patch: ['clean']
202 command_passed: success_message='Cleaned working directory' patch='10000'
203 run_webkit_patch: ['update']
204 command_passed: success_message='Updated working directory' patch='10000'
205 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
206 command_passed: success_message='Applied patch' patch='10000'
207 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
208 command_passed: success_message='ChangeLog validated' patch='10000'
209 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
210 command_passed: success_message='Built patch' patch='10000'
211 run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
212 command_passed: success_message='Passed tests' patch='10000'
213 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
214 command_passed: success_message='Landed patch' patch='10000'
215 """
216         self._run_through_task(commit_queue, expected_logs)
217
218     def test_fast_success_case(self):
219         commit_queue = MockCommitQueue([])
220         commit_queue.did_pass_testing_ews = lambda patch: True
221         expected_logs = """run_webkit_patch: ['clean']
222 command_passed: success_message='Cleaned working directory' patch='10000'
223 run_webkit_patch: ['update']
224 command_passed: success_message='Updated working directory' patch='10000'
225 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
226 command_passed: success_message='Applied patch' patch='10000'
227 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
228 command_passed: success_message='ChangeLog validated' patch='10000'
229 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
230 command_passed: success_message='Built patch' patch='10000'
231 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
232 command_passed: success_message='Landed patch' patch='10000'
233 """
234         self._run_through_task(commit_queue, expected_logs)
235
236     def test_clean_failure(self):
237         commit_queue = MockCommitQueue([
238             ScriptError("MOCK clean failure"),
239         ])
240         expected_logs = """run_webkit_patch: ['clean']
241 command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
242 """
243         self._run_through_task(commit_queue, expected_logs, expect_retry=True)
244
245     def test_update_failure(self):
246         commit_queue = MockCommitQueue([
247             None,
248             ScriptError("MOCK update failure"),
249         ])
250         expected_logs = """run_webkit_patch: ['clean']
251 command_passed: success_message='Cleaned working directory' patch='10000'
252 run_webkit_patch: ['update']
253 command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
254 """
255         self._run_through_task(commit_queue, expected_logs, expect_retry=True)
256
257     def test_apply_failure(self):
258         commit_queue = MockCommitQueue([
259             None,
260             None,
261             GoldenScriptError("MOCK apply failure"),
262         ])
263         expected_logs = """run_webkit_patch: ['clean']
264 command_passed: success_message='Cleaned working directory' patch='10000'
265 run_webkit_patch: ['update']
266 command_passed: success_message='Updated working directory' patch='10000'
267 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
268 command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
269 """
270         self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
271
272     def test_validate_changelog_failure(self):
273         commit_queue = MockCommitQueue([
274             None,
275             None,
276             None,
277             GoldenScriptError("MOCK validate failure"),
278         ])
279         expected_logs = """run_webkit_patch: ['clean']
280 command_passed: success_message='Cleaned working directory' patch='10000'
281 run_webkit_patch: ['update']
282 command_passed: success_message='Updated working directory' patch='10000'
283 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
284 command_passed: success_message='Applied patch' patch='10000'
285 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
286 command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
287 """
288         self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
289
290     def test_build_failure(self):
291         commit_queue = MockCommitQueue([
292             None,
293             None,
294             None,
295             None,
296             GoldenScriptError("MOCK build failure"),
297         ])
298         expected_logs = """run_webkit_patch: ['clean']
299 command_passed: success_message='Cleaned working directory' patch='10000'
300 run_webkit_patch: ['update']
301 command_passed: success_message='Updated working directory' patch='10000'
302 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
303 command_passed: success_message='Applied patch' patch='10000'
304 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
305 command_passed: success_message='ChangeLog validated' patch='10000'
306 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
307 command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
308 run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
309 command_passed: success_message='Able to build without patch' patch='10000'
310 """
311         self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
312
313     def test_red_build_failure(self):
314         commit_queue = MockCommitQueue([
315             None,
316             None,
317             None,
318             None,
319             ScriptError("MOCK build failure"),
320             ScriptError("MOCK clean build failure"),
321         ])
322         expected_logs = """run_webkit_patch: ['clean']
323 command_passed: success_message='Cleaned working directory' patch='10000'
324 run_webkit_patch: ['update']
325 command_passed: success_message='Updated working directory' patch='10000'
326 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
327 command_passed: success_message='Applied patch' patch='10000'
328 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
329 command_passed: success_message='ChangeLog validated' patch='10000'
330 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
331 command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
332 run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
333 command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
334 """
335         self._run_through_task(commit_queue, expected_logs, expect_retry=True)
336
337     def test_land_failure(self):
338         commit_queue = MockCommitQueue([
339             None,
340             None,
341             None,
342             None,
343             None,
344             None,
345             GoldenScriptError("MOCK land failure"),
346         ])
347         expected_logs = """run_webkit_patch: ['clean']
348 command_passed: success_message='Cleaned working directory' patch='10000'
349 run_webkit_patch: ['update']
350 command_passed: success_message='Updated working directory' patch='10000'
351 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
352 command_passed: success_message='Applied patch' patch='10000'
353 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
354 command_passed: success_message='ChangeLog validated' patch='10000'
355 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
356 command_passed: success_message='Built patch' patch='10000'
357 run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
358 command_passed: success_message='Passed tests' patch='10000'
359 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
360 command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
361 """
362         # FIXME: This should really be expect_retry=True for a better user experiance.
363         self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
364
365     def test_failed_archive(self):
366         commit_queue = MockSimpleTestPlanCommitQueue(
367             first_test_failures=["Fail1"],
368             second_test_failures=[],
369             clean_test_failures=[])
370
371         # It's possible for the delegate to fail to archive layout tests,
372         # but we shouldn't try to report flaky tests when that happens.
373         commit_queue.archive_last_test_results = lambda patch: None
374
375         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS)
376
377     def test_double_flaky_test_failure(self):
378         commit_queue = MockSimpleTestPlanCommitQueue(
379             first_test_failures=["Fail1"],
380             second_test_failures=["Fail2"],
381             clean_test_failures=["Fail1"])
382
383         # The (subtle) point of this test is that report_flaky_tests does not get
384         # called for this run.
385         # Note also that there is no attempt to run the tests w/o the patch.
386         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2"])
387
388     def test_test_failure(self):
389         commit_queue = MockSimpleTestPlanCommitQueue(
390             first_test_failures=["Fail1"],
391             second_test_failures=["Fail1"],
392             clean_test_failures=[])
393
394         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
395
396     def test_red_test_failure(self):
397         commit_queue = MockSimpleTestPlanCommitQueue(
398             first_test_failures=["Fail1"],
399             second_test_failures=["Fail1"],
400             clean_test_failures=["Fail1"])
401
402         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
403
404     def test_first_failure_limit(self):
405         commit_queue = MockSimpleTestPlanCommitQueue(
406             first_test_failures=_lots_of_failing_tests,
407             second_test_failures=[],
408             clean_test_failures=[])
409
410         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=1)
411
412     def test_first_failure_limit_with_some_tree_redness(self):
413         commit_queue = MockSimpleTestPlanCommitQueue(
414             first_test_failures=_lots_of_failing_tests,
415             second_test_failures=["Fail1", "Fail2", "Fail3"],
416             clean_test_failures=["Fail1", "Fail2", "Fail3"])
417
418         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=1)
419
420     def test_second_failure_limit(self):
421         # There need to be some failures in the first set of tests, or it won't even make it to the second test.
422         commit_queue = MockSimpleTestPlanCommitQueue(
423             first_test_failures=["Fail1", "Fail2", "Fail3"],
424             second_test_failures=_lots_of_failing_tests,
425             clean_test_failures=["Fail1", "Fail2", "Fail3"])
426
427         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=2)
428
429     def test_tree_failure_limit_with_patch_that_potentially_fixes_some_redness(self):
430         commit_queue = MockSimpleTestPlanCommitQueue(
431             first_test_failures=["Fail1", "Fail2", "Fail3"],
432             second_test_failures=["Fail1", "Fail2", "Fail3"],
433             clean_test_failures=_lots_of_failing_tests)
434
435         # Unfortunately there are cases where the clean build will randomly fail enough tests to hit the failure limit.
436         # With that in mind, we can't actually know that this patch is good or bad until we see a clean run that doesn't
437         # exceed the failure limit.
438         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
439
440     def test_first_and_second_failure_limit(self):
441         commit_queue = MockSimpleTestPlanCommitQueue(
442             first_test_failures=_lots_of_failing_tests,
443             second_test_failures=_lots_of_failing_tests,
444             clean_test_failures=[])
445
446         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
447
448     def test_first_and_clean_failure_limit(self):
449         commit_queue = MockSimpleTestPlanCommitQueue(
450             first_test_failures=_lots_of_failing_tests,
451             second_test_failures=[],
452             clean_test_failures=_lots_of_failing_tests)
453
454         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
455
456     def test_first_second_and_clean_failure_limit(self):
457         commit_queue = MockSimpleTestPlanCommitQueue(
458             first_test_failures=_lots_of_failing_tests,
459             second_test_failures=_lots_of_failing_tests,
460             clean_test_failures=_lots_of_failing_tests)
461
462         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
463
464     def test_red_tree_patch_rejection(self):
465         commit_queue = MockSimpleTestPlanCommitQueue(
466             first_test_failures=["Fail1", "Fail2"],
467             second_test_failures=["Fail1", "Fail2"],
468             clean_test_failures=["Fail1"])
469
470         # failure_status_id should be of the test with patch (1), not the test without patch (2).
471         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
472
473     def test_two_flaky_tests(self):
474         commit_queue = MockSimpleTestPlanCommitQueue(
475             first_test_failures=["Fail1"],
476             second_test_failures=["Fail2"],
477             clean_test_failures=["Fail1", "Fail2"])
478
479         # FIXME: This should pass, but as of right now, it defers.
480         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2"])
481
482     def test_one_flaky_test(self):
483         commit_queue = MockSimpleTestPlanCommitQueue(
484             first_test_failures=["Fail1"],
485             second_test_failures=[],
486             clean_test_failures=[])
487
488         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expected_reported_flaky_tests=["Fail1"])
489
490     def test_very_flaky_patch(self):
491         commit_queue = MockSimpleTestPlanCommitQueue(
492             first_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
493             second_test_failures=["Fail6", "Fail7", "Fail8", "Fail9", "Fail10"],
494             clean_test_failures=[])
495
496         # FIXME: This should actually fail, but right now it defers
497         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"])
498
499     def test_very_flaky_patch_with_some_tree_redness(self):
500         commit_queue = MockSimpleTestPlanCommitQueue(
501             first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
502             second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"],
503             clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
504
505         # FIXME: This should actually fail, but right now it defers
506         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"])
507
508     def test_different_test_failures(self):
509         commit_queue = MockSimpleTestPlanCommitQueue(
510             first_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
511             second_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
512             clean_test_failures=[])
513
514         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
515
516     def test_different_test_failures_with_some_tree_redness(self):
517         commit_queue = MockSimpleTestPlanCommitQueue(
518             first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
519             second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
520             clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
521
522         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
523
524     def test_different_test_failures_with_some_tree_redness_and_some_fixes(self):
525         commit_queue = MockSimpleTestPlanCommitQueue(
526             first_test_failures=["PreExistingFail1", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
527             second_test_failures=["PreExistingFail1", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
528             clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
529
530         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
531
532     def test_mildly_flaky_patch(self):
533         commit_queue = MockSimpleTestPlanCommitQueue(
534             first_test_failures=["Fail1"],
535             second_test_failures=["Fail2"],
536             clean_test_failures=[])
537
538         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=False, expected_reported_flaky_tests=["Fail1", "Fail2"])
539
540     def test_mildly_flaky_patch_with_some_tree_redness(self):
541         commit_queue = MockSimpleTestPlanCommitQueue(
542             first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1"],
543             second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail2"],
544             clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
545
546         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail1", "Fail2"])
547
548     def test_tree_more_red_than_patch(self):
549         commit_queue = MockSimpleTestPlanCommitQueue(
550             first_test_failures=["Fail1", "Fail2", "Fail3"],
551             second_test_failures=["Fail1", "Fail2", "Fail3"],
552             clean_test_failures=["Fail1", "Fail2", "Fail3", "Fail4"])
553
554         self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
555
556     def _expect_validate(self, patch, is_valid):
557         class MockDelegate(object):
558             def refetch_patch(self, patch):
559                 return patch
560
561         task = CommitQueueTask(MockDelegate(), patch)
562         self.assertEqual(task.validate(), is_valid)
563
564     def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
565         bug = bugzilla.Bug(bug_dict, None)
566         patch = bugzilla.Attachment(attachment_dict, bug)
567         patch._committer = committer
568         return patch
569
570     def test_validate(self):
571         self._expect_validate(self._mock_patch(), True)
572         self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
573         self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
574         self._expect_validate(self._mock_patch(committer=None), False)
575         self._expect_validate(self._mock_patch({'review': '-'}), False)