Run-webkit-tests should not fail if all tests found to run are skipped.
authorjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 13 May 2020 18:37:46 +0000 (18:37 +0000)
committerjlewis3@apple.com <jlewis3@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Wed, 13 May 2020 18:37:46 +0000 (18:37 +0000)
https://bugs.webkit.org/show_bug.cgi?id=210880

Reviewed by Jonathan Bedard.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.run): Added a check to see if we skiped all tests. If so We return a successful exit
code as we performed as expected.
* Scripts/webkitpy/layout_tests/models/test_run_results.py:
(RunDetails.__init__): Added in an additional class variable to be used as a way to record an
all skipped result check.
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
(main): Added a all test skipped check and we return early with the exit code.
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: Added a new test and removed the old
ones that no longer test the correct functionality.
(RunTest.test_all_tests_skipped):
(RunTest.test_no_tests_found): Deleted.
(RunTest.test_no_tests_found_2): Deleted.

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@261629 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py

index ebfddc4..dd19985 100644 (file)
@@ -1,3 +1,24 @@
+2020-05-12  Matt Lewis  <jlewis3@apple.com>
+
+        Run-webkit-tests should not fail if all tests found to run are skipped.
+        https://bugs.webkit.org/show_bug.cgi?id=210880
+
+        Reviewed by Jonathan Bedard.
+
+        * Scripts/webkitpy/layout_tests/controllers/manager.py:
+        (Manager.run): Added a check to see if we skiped all tests. If so We return a successful exit
+        code as we performed as expected.
+        * Scripts/webkitpy/layout_tests/models/test_run_results.py:
+        (RunDetails.__init__): Added in an additional class variable to be used as a way to record an
+        all skipped result check.
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        (main): Added a all test skipped check and we return early with the exit code.
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py: Added a new test and removed the old
+        ones that no longer test the correct functionality.
+        (RunTest.test_all_tests_skipped):
+        (RunTest.test_no_tests_found): Deleted.
+        (RunTest.test_no_tests_found_2): Deleted.
+
 2020-05-13  Kate Cheney  <katherine_cheney@apple.com>
 
         Add test for non-app-bound iframe under app-bound domain
index 9e9c9bf..f58b1c8 100644 (file)
@@ -218,7 +218,12 @@ class Manager(object):
         self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
         start_time = time.time()
 
-        # Check to make sure we're not skipping every test.
+        # Check to see if all tests we are running are skipped.
+        if tests_to_skip == total_tests:
+            _log.error("All tests skipped.")
+            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)
+
+        # Check to make sure we have no tests to run that are not skipped.
         if not sum([len(tests) for tests in itervalues(tests_to_run_by_device)]):
             _log.critical('No tests to run.')
             return test_run_results.RunDetails(exit_code=-1)
index eacda85..2c23c04 100644 (file)
@@ -157,12 +157,13 @@ class TestRunResults(object):
 
 
 class RunDetails(object):
-    def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
+    def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False, skipped_all_tests=False):
         self.exit_code = exit_code
         self.summarized_results = summarized_results
         self.initial_results = initial_results
         self.retry_results = retry_results
         self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
+        self.skipped_all_tests = skipped_all_tests
 
 
 def _interpret_test_failures(failures):
index e0e1927..0e1a455 100755 (executable)
@@ -89,6 +89,8 @@ def main(argv, stdout, stderr):
         options.additional_env_var.append('JSC_maxPerThreadStackUsage=' + str(stackSizeInBytes))
         options.additional_env_var.append('__XPC_JSC_maxPerThreadStackUsage=' + str(stackSizeInBytes))
         run_details = run(port, options, args, stderr)
+        if run_details.exit_code != -1 and run_details.skipped_all_tests:
+            return run_details.exit_code
         if run_details.exit_code != -1 and not run_details.initial_results.keyboard_interrupted:
             bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
             bot_printer.print_results(run_details)
index 30802bd..abbd18e 100644 (file)
@@ -280,15 +280,10 @@ class RunTest(unittest.TestCase, StreamTestingMixin):
             _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
             self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.getvalue().splitlines()]))
 
-    def test_no_tests_found(self):
-        details, err, _ = logging_run(['resources'], tests_included=True)
-        self.assertEqual(details.exit_code, -1)
-        self.assertContains(err, 'No tests to run.\n')
-
-    def test_no_tests_found_2(self):
+    def test_all_tests_skipped(self):
         details, err, _ = logging_run(['foo'], tests_included=True)
-        self.assertEqual(details.exit_code, -1)
-        self.assertContains(err, 'No tests to run.\n')
+        self.assertEqual(details.exit_code, 0)
+        self.assertContains(err, 'All tests skipped.\n')
 
     def test_natural_order(self):
         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']