Test regressions are not detected when image result is missing
authorap@apple.com <ap@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 25 Oct 2014 23:29:51 +0000 (23:29 +0000)
committerap@apple.com <ap@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Sat, 25 Oct 2014 23:29:51 +0000 (23:29 +0000)
https://bugs.webkit.org/show_bug.cgi?id=138070

Reviewed by Simon Fraser.

* Scripts/webkitpy/layout_tests/models/test_run_results.py:
* Scripts/webkitpy/layout_tests/views/buildbot_results.py:
Count these as regressions, not as flaky tests.

* Scripts/webkitpy/port/test.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
Test it.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@175204 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
Tools/Scripts/webkitpy/port/test.py

index 07b1370a40ca526fdf9b4d64b2acb98108c41a5b..8898102ed62b6569acd107feb13fcf5d20cdcab7 100644 (file)
@@ -1,3 +1,18 @@
+2014-10-25  Alexey Proskuryakov  <ap@apple.com>
+
+        Test regressions are not detected when image result is missing
+        https://bugs.webkit.org/show_bug.cgi?id=138070
+
+        Reviewed by Simon Fraser.
+
+        * Scripts/webkitpy/layout_tests/models/test_run_results.py:
+        * Scripts/webkitpy/layout_tests/views/buildbot_results.py:
+        Count these as regressions, not as flaky tests.
+
+        * Scripts/webkitpy/port/test.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
+        Test it.
+
 2014-10-24  Timothy Horton  <timothy_horton@apple.com>
 
         Add Conrad Shultz to the contributors list.
 2014-10-24  Timothy Horton  <timothy_horton@apple.com>
 
         Add Conrad Shultz to the contributors list.
index f811f3873efc5b4ecf4c2d1ef1fc0b6e260b4557..575b1f5a9b5be6b6a0b69bf4a2e77e92bacdb388 100644 (file)
@@ -192,7 +192,9 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
             elif retry_results:
                 retry_result_type = retry_results.unexpected_results_by_name[test_name].type
                 if result_type != retry_result_type:
             elif retry_results:
                 retry_result_type = retry_results.unexpected_results_by_name[test_name].type
                 if result_type != retry_result_type:
-                    if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
+                    if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and (retry_result_type == test_expectations.IMAGE_PLUS_TEXT or retry_result_type == test_expectations.MISSING):
+                        if retry_result_type == test_expectations.MISSING:
+                            num_missing += 1
                         num_regressions += 1
                         test_dict['report'] = 'REGRESSION'
                     else:
                         num_regressions += 1
                         test_dict['report'] = 'REGRESSION'
                     else:
index 862cc6e113919797cf4ffcb00839c2222a3407cc..edd646f6694863058507d4e9c9e32ba1645e477d 100644 (file)
@@ -671,6 +671,22 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
         self.assertFalse(json["pixel_tests_enabled"])
         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
 
         self.assertFalse(json["pixel_tests_enabled"])
         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
 
+    def test_failed_text_with_missing_pixel_results_on_retry(self):
+        # Test what happens when pixel results are missing on retry.
+        host = MockHost()
+        details, err, _ = logging_run(['--no-show-results',
+            '--no-new-test-results', '--no-pixel-tests',
+            'failures/unexpected/text-image-missing.html'],
+            tests_included=True, host=host)
+        file_list = host.filesystem.written_files.keys()
+        self.assertEqual(details.exit_code, 1)
+        expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
+        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+        self.assertTrue(json_string.find(expected_token) != -1)
+        self.assertTrue(json_string.find('"num_regressions":1') != -1)
+        self.assertTrue(json_string.find('"num_flaky":0') != -1)
+        self.assertTrue(json_string.find('"num_missing":1') != -1)
+
     def test_retrying_uses_retries_directory(self):
         host = MockHost()
         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
     def test_retrying_uses_retries_directory(self):
         host = MockHost()
         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
index 66a3daeac81cb0baed4d8e1bd29e3d8e8b323e60..9b0a971cb0b1d5122787d889b596c0d0b108d942 100644 (file)
@@ -115,7 +115,7 @@ class BuildBotPrinter(object):
                     add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                 else:
                     add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
                     add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                 else:
                     add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
-            elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
+            elif enabled_pixel_tests_in_retry and (actual == ['TEXT', 'IMAGE+TEXT'] or actual == ['TEXT', 'MISSING']):
                 add_to_dict_of_lists(regressions, actual[0], test)
             elif len(actual) > 1:
                 # We group flaky tests by the first actual result we got.
                 add_to_dict_of_lists(regressions, actual[0], test)
             elif len(actual) > 1:
                 # We group flaky tests by the first actual result we got.
index 362a0e877900b2f9dcc78111d4e77227bcb8acf9..42fe6898ad3e014f6ee964c4e98eca6d7eac7aba 100644 (file)
@@ -100,11 +100,11 @@ class TestList(object):
 #
 # These numbers may need to be updated whenever we add or delete tests.
 #
 #
 # These numbers may need to be updated whenever we add or delete tests.
 #
-TOTAL_TESTS = 106
+TOTAL_TESTS = 107
 TOTAL_SKIPS = 28
 TOTAL_RETRIES = 14
 
 TOTAL_SKIPS = 28
 TOTAL_RETRIES = 14
 
-UNEXPECTED_PASSES = 6
+UNEXPECTED_PASSES = 7
 UNEXPECTED_FAILURES = 17
 
 def unit_test_list():
 UNEXPECTED_FAILURES = 17
 
 def unit_test_list():
@@ -167,6 +167,9 @@ layer at (0,0) size 800x34
               actual_text='text-image-checksum_fail-txt',
               actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
               actual_checksum='text-image-checksum_fail-checksum')
               actual_text='text-image-checksum_fail-txt',
               actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
               actual_checksum='text-image-checksum_fail-checksum')
+    tests.add('failures/unexpected/text-image-missing.html',
+              actual_text='text-image-checksum_fail-txt',
+              expected_image=None)
     tests.add('failures/unexpected/checksum-with-matching-image.html',
               actual_checksum='text-image-checksum_fail-checksum')
     tests.add('failures/unexpected/skip_pass.html')
     tests.add('failures/unexpected/checksum-with-matching-image.html',
               actual_checksum='text-image-checksum_fail-checksum')
     tests.add('failures/unexpected/skip_pass.html')