1 # Copyright (c) 2010 Google Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
13 # * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 from datetime import datetime
33 from webkitpy.common.net import bugzilla
34 from webkitpy.common.net.layouttestresults import LayoutTestResults
35 from webkitpy.common.system.executive import ScriptError
36 from webkitpy.common.system.outputcapture import OutputCapture
37 from webkitpy.layout_tests.models import test_results
38 from webkitpy.layout_tests.models import test_failures
39 from webkitpy.thirdparty.mock import Mock
40 from webkitpy.tool.bot.commitqueuetask import *
41 from webkitpy.tool.mocktool import MockTool
43 _log = logging.getLogger(__name__)
46 class MockCommitQueue(CommitQueueTaskDelegate):
47 def __init__(self, error_plan):
48 self._error_plan = error_plan
49 self._failure_status_id = 0
50 self._flaky_tests = []
52 def run_command(self, command):
53 _log.info("run_webkit_patch: %s" % command)
55 error = self._error_plan.pop(0)
59 def command_passed(self, success_message, patch):
60 _log.info("command_passed: success_message='%s' patch='%s'" % (
61 success_message, patch.id()))
63 def command_failed(self, failure_message, script_error, patch):
64 _log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
65 failure_message, script_error, patch.id()))
66 self._failure_status_id += 1
67 return self._failure_status_id
69 def refetch_patch(self, patch):
72 def test_results(self):
73 return LayoutTestResults(test_results=[], did_exceed_test_failure_limit=False)
75 def report_flaky_tests(self, patch, flaky_results, results_archive):
76 current_flaky_tests = [result.test_name for result in flaky_results]
77 self._flaky_tests += current_flaky_tests
78 _log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), current_flaky_tests, results_archive.filename))
80 def get_reported_flaky_tests(self):
81 return self._flaky_tests
83 def archive_last_test_results(self, patch):
84 _log.info("archive_last_test_results: patch='%s'" % patch.id())
86 archive.filename = "mock-archive-%s.zip" % patch.id()
89 def build_style(self):
92 def did_pass_testing_ews(self, patch):
96 class FailingTestCommitQueue(MockCommitQueue):
97 def __init__(self, error_plan, test_failure_plan):
98 MockCommitQueue.__init__(self, error_plan)
99 self._test_run_counter = -1 # Special value to indicate tests have never been run.
100 self._test_failure_plan = test_failure_plan
102 def run_command(self, command):
103 if command[0] == "build-and-test":
104 self._test_run_counter += 1
105 MockCommitQueue.run_command(self, command)
107 def _mock_test_result(self, testname):
108 return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
110 def test_results(self):
111 # Doesn't make sense to ask for the test_results until the tests have run at least once.
112 assert(self._test_run_counter >= 0)
113 failures_for_run = self._test_failure_plan[self._test_run_counter]
114 assert(isinstance(failures_for_run, list))
115 results = LayoutTestResults(test_results=map(self._mock_test_result, failures_for_run), did_exceed_test_failure_limit=(len(failures_for_run) >= 10))
119 class PatchAnalysisResult(object):
125 class MockSimpleTestPlanCommitQueue(MockCommitQueue):
126 def __init__(self, first_test_failures, second_test_failures, clean_test_failures):
127 MockCommitQueue.__init__(self, [])
128 self._did_run_clean_tests = False
129 self._patch_test_results = [first_test_failures, second_test_failures]
130 self._clean_test_results = [clean_test_failures]
131 self._current_test_results = []
133 def run_command(self, command):
134 MockCommitQueue.run_command(self, command)
135 if command[0] == "build-and-test":
136 if "--no-clean" in command:
137 self._current_test_results = self._patch_test_results.pop(0)
139 self._current_test_results = self._clean_test_results.pop(0)
140 self._did_run_clean_tests = True
142 if self._current_test_results:
143 raise ScriptError("MOCK test failure")
145 def _mock_test_result(self, testname):
146 return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
148 def test_results(self):
149 assert(isinstance(self._current_test_results, list))
150 return LayoutTestResults(test_results=map(self._mock_test_result, self._current_test_results), did_exceed_test_failure_limit=(len(self._current_test_results) >= 10))
152 def did_run_clean_tests(self):
153 return self._did_run_clean_tests
156 # We use GoldenScriptError to make sure that the code under test throws the
157 # correct (i.e., golden) exception.
158 class GoldenScriptError(ScriptError):
162 _lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
165 class CommitQueueTaskTest(unittest.TestCase):
166 def _run_and_expect_patch_analysis_result(self, commit_queue, expected_analysis_result, expected_reported_flaky_tests=[], expect_clean_tests_to_run=False, expected_failure_status_id=0):
167 tool = MockTool(log_executive=True)
168 patch = tool.bugs.fetch_attachment(10000)
169 task = CommitQueueTask(commit_queue, patch)
174 analysis_result = PatchAnalysisResult.PASS
176 analysis_result = PatchAnalysisResult.DEFER
178 analysis_result = PatchAnalysisResult.FAIL
180 self.assertEqual(analysis_result, expected_analysis_result)
181 self.assertEqual(frozenset(commit_queue.get_reported_flaky_tests()), frozenset(expected_reported_flaky_tests))
182 self.assertEqual(commit_queue.did_run_clean_tests(), expect_clean_tests_to_run)
184 # The failure status only means anything if we actually failed.
185 if expected_analysis_result == PatchAnalysisResult.FAIL:
186 self.assertEqual(task.failure_status_id, expected_failure_status_id)
187 self.assertIsInstance(task.results_from_patch_test_run(patch), LayoutTestResults)
189 def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
191 tool = MockTool(log_executive=True)
192 patch = tool.bugs.fetch_attachment(10000)
193 task = CommitQueueTask(commit_queue, patch)
194 success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception)
195 if not expected_exception:
196 self.assertEqual(success, not expect_retry)
199 def test_success_case(self):
200 commit_queue = MockCommitQueue([])
201 expected_logs = """run_webkit_patch: ['clean']
202 command_passed: success_message='Cleaned working directory' patch='10000'
203 run_webkit_patch: ['update']
204 command_passed: success_message='Updated working directory' patch='10000'
205 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
206 command_passed: success_message='Applied patch' patch='10000'
207 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
208 command_passed: success_message='ChangeLog validated' patch='10000'
209 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=release']
210 command_passed: success_message='Built patch' patch='10000'
211 run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive', '--build-style=release']
212 command_passed: success_message='Passed tests' patch='10000'
213 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
214 command_passed: success_message='Landed patch' patch='10000'
216 self._run_through_task(commit_queue, expected_logs)
218 def test_fast_success_case(self):
219 commit_queue = MockCommitQueue([])
220 commit_queue.did_pass_testing_ews = lambda patch: True
221 expected_logs = """run_webkit_patch: ['clean']
222 command_passed: success_message='Cleaned working directory' patch='10000'
223 run_webkit_patch: ['update']
224 command_passed: success_message='Updated working directory' patch='10000'
225 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
226 command_passed: success_message='Applied patch' patch='10000'
227 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
228 command_passed: success_message='ChangeLog validated' patch='10000'
229 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=release']
230 command_passed: success_message='Built patch' patch='10000'
231 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
232 command_passed: success_message='Landed patch' patch='10000'
234 self._run_through_task(commit_queue, expected_logs)
236 def test_clean_failure(self):
237 commit_queue = MockCommitQueue([
238 ScriptError("MOCK clean failure"),
240 expected_logs = """run_webkit_patch: ['clean']
241 command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
243 self._run_through_task(commit_queue, expected_logs, expect_retry=True)
245 def test_update_failure(self):
246 commit_queue = MockCommitQueue([
248 ScriptError("MOCK update failure"),
250 expected_logs = """run_webkit_patch: ['clean']
251 command_passed: success_message='Cleaned working directory' patch='10000'
252 run_webkit_patch: ['update']
253 command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
255 self._run_through_task(commit_queue, expected_logs, expect_retry=True)
257 def test_apply_failure(self):
258 commit_queue = MockCommitQueue([
261 GoldenScriptError("MOCK apply failure"),
263 expected_logs = """run_webkit_patch: ['clean']
264 command_passed: success_message='Cleaned working directory' patch='10000'
265 run_webkit_patch: ['update']
266 command_passed: success_message='Updated working directory' patch='10000'
267 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
268 command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
270 self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
272 def test_validate_changelog_failure(self):
273 commit_queue = MockCommitQueue([
277 GoldenScriptError("MOCK validate failure"),
279 expected_logs = """run_webkit_patch: ['clean']
280 command_passed: success_message='Cleaned working directory' patch='10000'
281 run_webkit_patch: ['update']
282 command_passed: success_message='Updated working directory' patch='10000'
283 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
284 command_passed: success_message='Applied patch' patch='10000'
285 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
286 command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
288 self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
290 def test_build_failure(self):
291 commit_queue = MockCommitQueue([
296 GoldenScriptError("MOCK build failure"),
298 expected_logs = """run_webkit_patch: ['clean']
299 command_passed: success_message='Cleaned working directory' patch='10000'
300 run_webkit_patch: ['update']
301 command_passed: success_message='Updated working directory' patch='10000'
302 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
303 command_passed: success_message='Applied patch' patch='10000'
304 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
305 command_passed: success_message='ChangeLog validated' patch='10000'
306 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=release']
307 command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
308 run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=release']
309 command_passed: success_message='Able to build without patch' patch='10000'
311 self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
313 def test_red_build_failure(self):
314 commit_queue = MockCommitQueue([
319 ScriptError("MOCK build failure"),
320 ScriptError("MOCK clean build failure"),
322 expected_logs = """run_webkit_patch: ['clean']
323 command_passed: success_message='Cleaned working directory' patch='10000'
324 run_webkit_patch: ['update']
325 command_passed: success_message='Updated working directory' patch='10000'
326 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
327 command_passed: success_message='Applied patch' patch='10000'
328 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
329 command_passed: success_message='ChangeLog validated' patch='10000'
330 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=release']
331 command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
332 run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=release']
333 command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
335 self._run_through_task(commit_queue, expected_logs, expect_retry=True)
337 def test_land_failure(self):
338 commit_queue = MockCommitQueue([
345 GoldenScriptError("MOCK land failure"),
347 expected_logs = """run_webkit_patch: ['clean']
348 command_passed: success_message='Cleaned working directory' patch='10000'
349 run_webkit_patch: ['update']
350 command_passed: success_message='Updated working directory' patch='10000'
351 run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
352 command_passed: success_message='Applied patch' patch='10000'
353 run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
354 command_passed: success_message='ChangeLog validated' patch='10000'
355 run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=release']
356 command_passed: success_message='Built patch' patch='10000'
357 run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive', '--build-style=release']
358 command_passed: success_message='Passed tests' patch='10000'
359 run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
360 command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
362 # FIXME: This should really be expect_retry=True for a better user experiance.
363 self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
365 def test_failed_archive(self):
366 commit_queue = MockSimpleTestPlanCommitQueue(
367 first_test_failures=["Fail1"],
368 second_test_failures=[],
369 clean_test_failures=[])
371 # It's possible for the delegate to fail to archive layout tests,
372 # but we shouldn't try to report flaky tests when that happens.
373 commit_queue.archive_last_test_results = lambda patch: None
375 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS)
377 def test_double_flaky_test_failure(self):
378 commit_queue = MockSimpleTestPlanCommitQueue(
379 first_test_failures=["Fail1"],
380 second_test_failures=["Fail2"],
381 clean_test_failures=["Fail1"])
383 # The (subtle) point of this test is that report_flaky_tests does not get
384 # called for this run.
385 # Note also that there is no attempt to run the tests w/o the patch.
386 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2"])
388 def test_test_failure(self):
389 commit_queue = MockSimpleTestPlanCommitQueue(
390 first_test_failures=["Fail1"],
391 second_test_failures=["Fail1"],
392 clean_test_failures=[])
394 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
396 def test_red_test_failure(self):
397 commit_queue = MockSimpleTestPlanCommitQueue(
398 first_test_failures=["Fail1"],
399 second_test_failures=["Fail1"],
400 clean_test_failures=["Fail1"])
402 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
404 def test_first_failure_limit(self):
405 commit_queue = MockSimpleTestPlanCommitQueue(
406 first_test_failures=_lots_of_failing_tests,
407 second_test_failures=[],
408 clean_test_failures=[])
410 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=1)
412 def test_first_failure_limit_with_some_tree_redness(self):
413 commit_queue = MockSimpleTestPlanCommitQueue(
414 first_test_failures=_lots_of_failing_tests,
415 second_test_failures=["Fail1", "Fail2", "Fail3"],
416 clean_test_failures=["Fail1", "Fail2", "Fail3"])
418 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=1)
420 def test_second_failure_limit(self):
421 # There need to be some failures in the first set of tests, or it won't even make it to the second test.
422 commit_queue = MockSimpleTestPlanCommitQueue(
423 first_test_failures=["Fail1", "Fail2", "Fail3"],
424 second_test_failures=_lots_of_failing_tests,
425 clean_test_failures=["Fail1", "Fail2", "Fail3"])
427 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_failure_status_id=2)
429 def test_tree_failure_limit_with_patch_that_potentially_fixes_some_redness(self):
430 commit_queue = MockSimpleTestPlanCommitQueue(
431 first_test_failures=["Fail1", "Fail2", "Fail3"],
432 second_test_failures=["Fail1", "Fail2", "Fail3"],
433 clean_test_failures=_lots_of_failing_tests)
435 # Unfortunately there are cases where the clean build will randomly fail enough tests to hit the failure limit.
436 # With that in mind, we can't actually know that this patch is good or bad until we see a clean run that doesn't
437 # exceed the failure limit.
438 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
440 def test_first_and_second_failure_limit(self):
441 commit_queue = MockSimpleTestPlanCommitQueue(
442 first_test_failures=_lots_of_failing_tests,
443 second_test_failures=_lots_of_failing_tests,
444 clean_test_failures=[])
446 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
448 def test_first_and_clean_failure_limit(self):
449 commit_queue = MockSimpleTestPlanCommitQueue(
450 first_test_failures=_lots_of_failing_tests,
451 second_test_failures=[],
452 clean_test_failures=_lots_of_failing_tests)
454 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
456 def test_first_second_and_clean_failure_limit(self):
457 commit_queue = MockSimpleTestPlanCommitQueue(
458 first_test_failures=_lots_of_failing_tests,
459 second_test_failures=_lots_of_failing_tests,
460 clean_test_failures=_lots_of_failing_tests)
462 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True)
464 def test_red_tree_patch_rejection(self):
465 commit_queue = MockSimpleTestPlanCommitQueue(
466 first_test_failures=["Fail1", "Fail2"],
467 second_test_failures=["Fail1", "Fail2"],
468 clean_test_failures=["Fail1"])
470 # failure_status_id should be of the test with patch (1), not the test without patch (2).
471 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_failure_status_id=1)
473 def test_two_flaky_tests(self):
474 commit_queue = MockSimpleTestPlanCommitQueue(
475 first_test_failures=["Fail1"],
476 second_test_failures=["Fail2"],
477 clean_test_failures=["Fail1", "Fail2"])
479 # FIXME: This should pass, but as of right now, it defers.
480 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2"])
482 def test_one_flaky_test(self):
483 commit_queue = MockSimpleTestPlanCommitQueue(
484 first_test_failures=["Fail1"],
485 second_test_failures=[],
486 clean_test_failures=[])
488 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expected_reported_flaky_tests=["Fail1"])
490 def test_very_flaky_patch(self):
491 commit_queue = MockSimpleTestPlanCommitQueue(
492 first_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
493 second_test_failures=["Fail6", "Fail7", "Fail8", "Fail9", "Fail10"],
494 clean_test_failures=[])
496 # FIXME: This should actually fail, but right now it defers
497 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expected_reported_flaky_tests=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"])
499 def test_very_flaky_patch_with_some_tree_redness(self):
500 commit_queue = MockSimpleTestPlanCommitQueue(
501 first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
502 second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"],
503 clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
505 # FIXME: This should actually fail, but right now it defers
506 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6", "Fail7", "Fail8", "Fail9", "Fail10"])
508 def test_different_test_failures(self):
509 commit_queue = MockSimpleTestPlanCommitQueue(
510 first_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
511 second_test_failures=["Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
512 clean_test_failures=[])
514 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
516 def test_different_test_failures_with_some_tree_redness(self):
517 commit_queue = MockSimpleTestPlanCommitQueue(
518 first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
519 second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
520 clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
522 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
524 def test_different_test_failures_with_some_tree_redness_and_some_fixes(self):
525 commit_queue = MockSimpleTestPlanCommitQueue(
526 first_test_failures=["PreExistingFail1", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5", "Fail6"],
527 second_test_failures=["PreExistingFail1", "Fail1", "Fail2", "Fail3", "Fail4", "Fail5"],
528 clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
530 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.FAIL, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail6"], expected_failure_status_id=1)
532 def test_mildly_flaky_patch(self):
533 commit_queue = MockSimpleTestPlanCommitQueue(
534 first_test_failures=["Fail1"],
535 second_test_failures=["Fail2"],
536 clean_test_failures=[])
538 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=False, expected_reported_flaky_tests=["Fail1", "Fail2"])
540 def test_mildly_flaky_patch_with_some_tree_redness(self):
541 commit_queue = MockSimpleTestPlanCommitQueue(
542 first_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail1"],
543 second_test_failures=["PreExistingFail1", "PreExistingFail2", "Fail2"],
544 clean_test_failures=["PreExistingFail1", "PreExistingFail2"])
546 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.DEFER, expect_clean_tests_to_run=True, expected_reported_flaky_tests=["Fail1", "Fail2"])
548 def test_tree_more_red_than_patch(self):
549 commit_queue = MockSimpleTestPlanCommitQueue(
550 first_test_failures=["Fail1", "Fail2", "Fail3"],
551 second_test_failures=["Fail1", "Fail2", "Fail3"],
552 clean_test_failures=["Fail1", "Fail2", "Fail3", "Fail4"])
554 self._run_and_expect_patch_analysis_result(commit_queue, PatchAnalysisResult.PASS, expect_clean_tests_to_run=True)
556 def _expect_validate(self, patch, is_valid):
557 class MockDelegate(object):
558 def refetch_patch(self, patch):
561 task = CommitQueueTask(MockDelegate(), patch)
562 self.assertEqual(task.validate(), is_valid)
564 def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
565 bug = bugzilla.Bug(bug_dict, None)
566 patch = bugzilla.Attachment(attachment_dict, bug)
567 patch._committer = committer
570 def test_validate(self):
571 self._expect_validate(self._mock_patch(), True)
572 self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
573 self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
574 self._expect_validate(self._mock_patch(committer=None), False)
575 self._expect_validate(self._mock_patch({'review': '-'}), False)