2010-08-31 Dirk Pranke <dpranke@chromium.org>
authordpranke@chromium.org <dpranke@chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 31 Aug 2010 22:25:07 +0000 (22:25 +0000)
committerdpranke@chromium.org <dpranke@chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Tue, 31 Aug 2010 22:25:07 +0000 (22:25 +0000)
        Unreviewed, rolling out r66542.
        http://trac.webkit.org/changeset/66542
        https://bugs.webkit.org/show_bug.cgi?id=44902

        r66542 - the weird logging dependencies in Python stuck again ...

        * Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Removed.
        * Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Removed.
        * Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Removed.
        * Scripts/webkitpy/layout_tests/data/passes/error.html: Removed.
        * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
        * Scripts/webkitpy/layout_tests/layout_package/printing.py:
        * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
        * Scripts/webkitpy/layout_tests/port/base.py:
        * Scripts/webkitpy/layout_tests/port/test.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@66547 268f45cc-cd09-0410-ab3c-d52691b4dbfc

13 files changed:
WebKitTools/ChangeLog
WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html [deleted file]
WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html [deleted file]
WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt [deleted file]
WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html [deleted file]
WebKitTools/Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py

index 33dd4df1026287476bc4165b312159eafca4cb97..f797ac3fb8508219367d65f4b57bff467b4496a0 100644 (file)
@@ -1,3 +1,24 @@
+2010-08-31  Dirk Pranke  <dpranke@chromium.org>
+
+        Unreviewed, rolling out r66542.
+        http://trac.webkit.org/changeset/66542
+        https://bugs.webkit.org/show_bug.cgi?id=44902
+
+        r66542 - the weird logging dependencies in Python stuck again ...
+
+        * Scripts/webkitpy/layout_tests/data/failures/expected/exception.html: Removed.
+        * Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html: Removed.
+        * Scripts/webkitpy/layout_tests/data/passes/error-expected.txt: Removed.
+        * Scripts/webkitpy/layout_tests/data/passes/error.html: Removed.
+        * Scripts/webkitpy/layout_tests/data/platform/test/test_expectations.txt:
+        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py:
+        * Scripts/webkitpy/layout_tests/layout_package/printing.py:
+        * Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py:
+        * Scripts/webkitpy/layout_tests/port/base.py:
+        * Scripts/webkitpy/layout_tests/port/test.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
+
 2010-08-31  Dumitru Daniliuc  <dumi@chromium.org>
 
         Reviewed by Tony Chang.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/exception.html
deleted file mode 100644 (file)
index 38c54e3..0000000
+++ /dev/null
@@ -1 +0,0 @@
-exception
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/failures/expected/keyboard.html
deleted file mode 100644 (file)
index c253983..0000000
+++ /dev/null
@@ -1 +0,0 @@
-keyboard
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error-expected.txt
deleted file mode 100644 (file)
index 9427269..0000000
+++ /dev/null
@@ -1 +0,0 @@
-error-txt
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html b/WebKitTools/Scripts/webkitpy/layout_tests/data/passes/error.html
deleted file mode 100644 (file)
index 8276753..0000000
+++ /dev/null
@@ -1 +0,0 @@
-error
index 16556e3f8c1940711441bd46e2c889a3b14c289b..6e66caab3fb6dd086cd2f4ddf223149c17209fa5 100644 (file)
@@ -8,5 +8,3 @@ WONTFIX : failures/expected/missing_image.html = MISSING PASS
 WONTFIX : failures/expected/missing_text.html = MISSING PASS
 WONTFIX : failures/expected/text.html = TEXT
 WONTFIX : failures/expected/timeout.html = TIMEOUT
-WONTFIX SKIP : failures/expected/keyboard.html = CRASH
-WONTFIX SKIP : failures/expected/exception.html = CRASH
index ec33086e17fc8de7267698eb3600eb43f263e49a..63434001b53de1cc41aa10f7a8e38be6b60ac9ab 100644 (file)
@@ -169,11 +169,6 @@ class SingleTestThread(threading.Thread):
         self._output_dir = output_dir
 
     def run(self):
-        self._covered_run()
-
-    def _covered_run(self):
-        # FIXME: this is a separate routine to work around a bug
-        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         test_info = self._test_info
         driver = self._port.create_driver(self._image_path, self._shell_args)
         driver.start()
@@ -292,11 +287,6 @@ class TestShellThread(WatchableThread):
     def run(self):
         """Delegate main work to a helper method and watch for uncaught
         exceptions."""
-        self._covered_run()
-
-    def _covered_run(self):
-        # FIXME: this is a separate routine to work around a bug
-        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         self._thread_id = thread.get_ident()
         self._start_time = time.time()
         self._num_tests = 0
@@ -313,9 +303,9 @@ class TestShellThread(WatchableThread):
             self._exception_info = sys.exc_info()
             self._stop_time = time.time()
             # Re-raise it and die.
-            _log.error('%s dying, exception raised: %s' % (self.getName(),
+            _log.error('%s dying: %s' % (self.getName(),
                        self._exception_info))
-
+            raise
         self._stop_time = time.time()
 
     def run_in_main_thread(self, test_runner, result_summary):
@@ -331,8 +321,14 @@ class TestShellThread(WatchableThread):
 
         If test_runner is not None, then we call test_runner.UpdateSummary()
         with the results of each test."""
-        batch_size = self._options.batch_size
+        batch_size = 0
         batch_count = 0
+        if self._options.batch_size:
+            try:
+                batch_size = int(self._options.batch_size)
+            except:
+                _log.info("Ignoring invalid batch size '%s'" %
+                          self._options.batch_size)
 
         # Append tests we're running to the existing tests_run.txt file.
         # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
index 1632a0ab0a8acc1147dc333686f336f4ba020c63..a9e015fceecb3c9ccb6f8e8c7f9e58b2d6d0988c 100644 (file)
@@ -126,8 +126,8 @@ def print_options():
     ]
 
 
-def _configure_logging(options, meter):
-    """Configures the logging system. Return the previous handler, if any."""
+def configure_logging(options, meter):
+    """Configures the logging system."""
     log_fmt = '%(message)s'
     log_datefmt = '%y%m%d %H:%M:%S'
     log_level = logging.INFO
@@ -136,23 +136,9 @@ def _configure_logging(options, meter):
                    '%(message)s')
         log_level = logging.DEBUG
 
-    root = logging.getLogger()
-    handler = logging.StreamHandler(meter)
-    handler.setFormatter(logging.Formatter(log_fmt, None))
-    if not root.handlers:
-        old_handler = None
-        root.addHandler(handler)
-    else:
-        old_handler = root.handlers[0]
-        root.handlers[0] = handler
-    root.setLevel(log_level)
-    return old_handler
-
+    logging.basicConfig(level=log_level, format=log_fmt,
+                        datefmt=log_datefmt, stream=meter)
 
-def _restore_logging(handler):
-    root = logging.getLogger()
-    if root and root.handlers[0]:
-        root.handlers[0] = handler
 
 def parse_print_options(print_options, verbose, child_processes,
                         is_fully_parallel):
@@ -251,20 +237,11 @@ class Printer(object):
 
         self._meter = metered_stream.MeteredStream(options.verbose,
                                                    regular_output)
-        self._old_handler = _configure_logging(self._options, self._meter)
+        configure_logging(self._options, self._meter)
 
         self.switches = parse_print_options(options.print_options,
             options.verbose, child_processes, is_fully_parallel)
 
-    def cleanup(self):
-        """Restore logging configuration to its initial settings."""
-        _restore_logging(self._old_handler)
-        self._old_handler = None
-
-    def __del__(self):
-        if self._old_handler:
-            _restore_logging(self._old_handler)
-
     # These two routines just hide the implmentation of the switches.
     def disabled(self, option):
         return not option in self.switches
index 2ba9d7aba32f8493bb3658ff6c4c027341ab0266..40c691f9a10e84c9cd947c2ec93a8629159fde1a 100644 (file)
@@ -37,7 +37,6 @@ import unittest
 import logging
 
 from webkitpy.common import array_stream
-from webkitpy.common.system import logtesting
 from webkitpy.layout_tests import port
 from webkitpy.layout_tests.layout_package import printing
 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -54,24 +53,25 @@ def get_options(args):
 
 class TestUtilityFunctions(unittest.TestCase):
     def test_configure_logging(self):
+        # FIXME: We need to figure out how to reset the basic logger.
+        # FIXME: If other testing classes call logging.basicConfig() then
+        # FIXME: these calls become no-ops and we can't control the
+        # FIXME: configuration to test things properly.
         options, args = get_options([])
         stream = array_stream.ArrayStream()
-        handler = printing._configure_logging(options, stream)
+        printing.configure_logging(options, stream)
         logging.info("this should be logged")
-        self.assertFalse(stream.empty())
+        self.assertFalse(stream.empty())
 
         stream.reset()
         logging.debug("this should not be logged")
-        self.assertTrue(stream.empty())
-
-        printing._restore_logging(handler)
+        # self.assertTrue(stream.empty())
 
         stream.reset()
         options, args = get_options(['--verbose'])
-        handler = printing._configure_logging(options, stream)
+        printing.configure_logging(options, stream)
         logging.debug("this should be logged")
-        self.assertFalse(stream.empty())
-        printing._restore_logging(handler)
+        # self.assertFalse(stream.empty())
 
     def test_print_options(self):
         options, args = get_options([])
@@ -421,12 +421,11 @@ class  Testprinter(unittest.TestCase):
         self.assertFalse(err.empty())
         self.assertTrue(out.empty())
 
-    def test_write_nothing(self):
+    def test_write(self):
         printer, err, out = self.get_printer(['--print', 'nothing'])
         printer.write("foo")
         self.assertTrue(err.empty())
 
-    def test_write_misc(self):
         printer, err, out = self.get_printer(['--print', 'misc'])
         printer.write("foo")
         self.assertFalse(err.empty())
@@ -434,7 +433,6 @@ class  Testprinter(unittest.TestCase):
         printer.write("foo", "config")
         self.assertTrue(err.empty())
 
-    def test_write_everything(self):
         printer, err, out = self.get_printer(['--print', 'everything'])
         printer.write("foo")
         self.assertFalse(err.empty())
@@ -442,10 +440,11 @@ class  Testprinter(unittest.TestCase):
         printer.write("foo", "config")
         self.assertFalse(err.empty())
 
-    def test_write_verbose(self):
+        # FIXME: this should be logged somewhere, but it actually
+        # disappears into the ether in the logging subsystem.
         printer, err, out = self.get_printer(['--verbose'])
         printer.write("foo")
-        self.assertTrue(not err.empty() and "foo" in err.get()[0])
+        self.assertTrue(err.empty())
         self.assertTrue(out.empty())
 
     def test_print_unexpected_results(self):
index 0dda7741ea2e031ddf3408db107913770ee4e641..af1af93d3d0246a5b7999208618a0caf02e8da0d 100644 (file)
@@ -327,6 +327,7 @@ class Port(object):
         if not self._webkit_base_dir:
             abspath = os.path.abspath(__file__)
             self._webkit_base_dir = abspath[0:abspath.find('WebKitTools')]
+            _log.debug("Using WebKit root: %s" % self._webkit_base_dir)
 
         return os.path.join(self._webkit_base_dir, *comps)
 
index e3093346b18e41ed9aea3948d134cf1fb0bd339a..d36b540c75aa8f7d049612a043e0247e2998a07f 100644 (file)
@@ -151,10 +151,7 @@ class TestDriver(base.Driver):
     def run_test(self, uri, timeoutms, image_hash):
         basename = uri[(uri.rfind("/") + 1):uri.rfind(".html")]
 
-        if 'error' in basename:
-            error = basename + "_error\n"
-        else:
-            error = ''
+        error = ''
         checksum = None
         # There are four currently supported types of tests: text, image,
         # image hash (checksum), and stderr output. The fake output
@@ -173,13 +170,10 @@ class TestDriver(base.Driver):
         # will allow us to see if any results get crossed by the rest of the
         # program.
         if 'failures' in uri:
-            if 'keyboard' in basename:
-                raise KeyboardInterrupt
-            if 'exception' in basename:
-                raise ValueError('exception from ' + basename)
-
             crash = 'crash' in basename
             timeout = 'timeout' in basename
+            if 'error' in basename:
+                error = basename + "_error\n"
             if 'text' in basename:
                 output = basename + '_failed-txt\n'
             else:
index b1e1f6c85943572579549edf85226f187123e5ca..7163e1bc56915d55a0c4069ade9ddfcc9de2b610 100755 (executable)
@@ -106,11 +106,12 @@ class TestInfo:
         self._image_hash = None
 
     def _read_image_hash(self):
-        if not os.path.exists(self._expected_hash_path):
-            return None
-
-        with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
-            return hash_file.read()
+        try:
+            with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
+                return hash_file.read()
+        except IOError, e:
+            if errno.ENOENT != e.errno:
+                raise
 
     def image_hash(self):
         # Read the image_hash lazily to reduce startup time.
@@ -335,8 +336,8 @@ class TestRunner:
         self._printer.print_expected("Found:  %d tests" %
                                      (len(self._test_files)))
         if not num_all_test_files:
-            _log.critical('No tests to run.')
-            return None
+            _log.critical("No tests to run.")
+            sys.exit(1)
 
         skipped = set()
         if num_all_test_files > 1 and not self._options.force:
@@ -725,11 +726,8 @@ class TestRunner:
         Return:
           The number of unexpected results (0 == success)
         """
-        # gather_test_files() must have been called first to initialize us.
-        # If we didn't find any files to test, we've errored out already in
-        # prepare_lists_and_print_output().
-        assert(len(self._test_files))
-
+        if not self._test_files:
+            return 0
         start_time = time.time()
 
         if self.needs_http():
@@ -1424,8 +1422,6 @@ def run(port_obj, options, args, regular_output=sys.stderr,
 
     printer.print_update("Preparing tests ...")
     result_summary = test_runner.prepare_lists_and_print_output()
-    if not result_summary:
-        return -1
 
     port_obj.setup_test_run()
 
@@ -1437,8 +1433,6 @@ def run(port_obj, options, args, regular_output=sys.stderr,
 
     port_obj.stop_helper()
 
-    printer.cleanup()
-
     _log.debug("Exit status: %d" % num_unexpected_results)
     return num_unexpected_results
 
@@ -1603,7 +1597,7 @@ def parse_args(args=None):
         #   Restart DumpRenderTree every n tests (default: 1000)
         optparse.make_option("--batch-size",
             help=("Run a the tests in batches (n), after every n tests, "
-                  "DumpRenderTree is relaunched."), type="int", default=0),
+                  "DumpRenderTree is relaunched.")),
         # old-run-webkit-tests calls --run-singly: -1|--singly
         # Isolate each test case run (implies --nthly 1 --verbose)
         optparse.make_option("--run-singly", action="store_true",
index 4cbfdfcdf8b9fd5a338ef14895b87b581d5c9898..3a3b14e5cbe54d93d2c7d7c2343bf26d694334b3 100644 (file)
@@ -41,7 +41,6 @@ import threading
 import unittest
 
 from webkitpy.common import array_stream
-from webkitpy.common.system import outputcapture
 from webkitpy.layout_tests import port
 from webkitpy.layout_tests import run_webkit_tests
 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
@@ -49,139 +48,75 @@ from webkitpy.layout_tests.layout_package import dump_render_tree_thread
 from webkitpy.thirdparty.mock import Mock
 
 
-def passing_run(args=[], port_obj=None, record_results=False,
+def passing_run(args, port_obj=None, record_results=False,
                 tests_included=False):
-    new_args = ['--print', 'nothing']
-    if not '--platform' in args:
-        new_args.extend(['--platform', 'test'])
-    if not record_results:
-        new_args.append('--no-record-results')
-    new_args.extend(args)
+    args.extend(['--print', 'nothing'])
     if not tests_included:
         # We use the glob to test that globbing works.
-        new_args.extend(['passes', 'failures/expected/*'])
-    options, parsed_args = run_webkit_tests.parse_args(new_args)
+        args.extend(['passes', 'failures/expected/*'])
+    if not record_results:
+        args.append('--no-record-results')
+    options, args = run_webkit_tests.parse_args(args)
     if port_obj is None:
         port_obj = port.get(options.platform, options)
-    res = run_webkit_tests.run(port_obj, options, parsed_args)
+    res = run_webkit_tests.run(port_obj, options, args)
     return res == 0
 
 
-def logging_run(args=[], tests_included=False):
-    new_args = ['--no-record-results']
-    if not '--platform' in args:
-        new_args.extend(['--platform', 'test'])
-    if args:
-        new_args.extend(args)
+def logging_run(args, tests_included=False):
+    args.extend(['--no-record-results'])
     if not tests_included:
-        new_args.extend(['passes', 'failures/expected/*'])
-    options, parsed_args = run_webkit_tests.parse_args(new_args)
+        args.extend(['passes', 'failures/expected/*'])
+    options, args = run_webkit_tests.parse_args(args)
     port_obj = port.get(options.platform, options)
     buildbot_output = array_stream.ArrayStream()
     regular_output = array_stream.ArrayStream()
-    res = run_webkit_tests.run(port_obj, options, parsed_args,
+    res = run_webkit_tests.run(port_obj, options, args,
                                buildbot_output=buildbot_output,
                                regular_output=regular_output)
     return (res, buildbot_output, regular_output)
 
 
 class MainTest(unittest.TestCase):
-    def test_basic(self):
-        self.assertTrue(passing_run())
+    def test_fast(self):
+        self.assertTrue(passing_run(['--platform', 'test']))
+        self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
+        self.assertTrue(passing_run(['--platform', 'test',
+                                     'passes/text.html'], tests_included=True))
 
-    def test_batch_size(self):
-        # FIXME: verify # of tests run
-        self.assertTrue(passing_run(['--batch-size', '2']))
+    def test_unexpected_failures(self):
+        # Run tests including the unexpected failures.
+        self.assertFalse(passing_run(['--platform', 'test'],
+                         tests_included=True))
 
-    def test_child_process_1(self):
+    def test_one_child_process(self):
         (res, buildbot_output, regular_output) = logging_run(
-             ['--print', 'config', '--child-processes', '1'])
+             ['--platform', 'test', '--print', 'config', '--child-processes',
+              '1'])
         self.assertTrue('Running one DumpRenderTree\n'
                         in regular_output.get())
 
-    def test_child_processes_2(self):
+    def test_two_child_processes(self):
         (res, buildbot_output, regular_output) = logging_run(
-             ['--print', 'config', '--child-processes', '2'])
+             ['--platform', 'test', '--print', 'config', '--child-processes',
+              '2'])
         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
                         in regular_output.get())
 
-    def test_exception_raised(self):
-        self.assertRaises(ValueError, logging_run,
-            ['failures/expected/exception.html'], tests_included=True)
-
-    def test_full_results_html(self):
-        # FIXME: verify html?
-        self.assertTrue(passing_run(['--full-results-html']))
-
-    def test_help_printing(self):
-        res, out, err = logging_run(['--help-printing'])
-        self.assertEqual(res, 0)
-        self.assertTrue(out.empty())
-        self.assertFalse(err.empty())
-
-    def test_keyboard_interrupt(self):
-        # Note that this also tests running a test marked as SKIP if
-        # you specify it explicitly.
-        self.assertRaises(KeyboardInterrupt, passing_run,
-            ['failures/expected/keyboard.html'], tests_included=True)
-
     def test_last_results(self):
-        passing_run(['--clobber-old-results'], record_results=True)
+        passing_run(['--platform', 'test'], record_results=True)
         (res, buildbot_output, regular_output) = logging_run(
-            ['--print-last-failures'])
+            ['--platform', 'test', '--print-last-failures'])
         self.assertEqual(regular_output.get(), ['\n\n'])
         self.assertEqual(buildbot_output.get(), [])
 
-    def test_lint_test_files(self):
-        # FIXME:  add errors?
-        res, out, err = logging_run(['--lint-test-files'], tests_included=True)
-        self.assertEqual(res, 0)
-        self.assertTrue(out.empty())
-        self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
-
     def test_no_tests_found(self):
-        res, out, err = logging_run(['resources'], tests_included=True)
-        self.assertEqual(res, -1)
-        self.assertTrue(out.empty())
-        self.assertTrue('No tests to run.\n' in err.get())
-
-    def test_no_tests_found_2(self):
-        res, out, err = logging_run(['foo'], tests_included=True)
-        self.assertEqual(res, -1)
-        self.assertTrue(out.empty())
-        self.assertTrue('No tests to run.\n' in err.get())
-
-    def test_randomize_order(self):
-        # FIXME: verify order was shuffled
-        self.assertTrue(passing_run(['--randomize-order']))
-
-    def test_run_chunk(self):
-        # FIXME: verify # of tests run
-        self.assertTrue(passing_run(['--run-chunk', '1:4']))
-
-    def test_run_force(self):
-        # This raises an exception because we run
-        # failures/expected/exception.html, which is normally SKIPped.
-        self.assertRaises(ValueError, logging_run, ['--force'])
-
-    def test_run_part(self):
-        # FIXME: verify # of tests run
-        self.assertTrue(passing_run(['--run-part', '1:2']))
-
-    def test_run_singly(self):
-        self.assertTrue(passing_run(['--run-singly']))
-
-    def test_single_file(self):
-        # FIXME: verify # of tests run
-        self.assertTrue(passing_run(['passes/text.html'], tests_included=True))
-
-    def test_unexpected_failures(self):
-        # Run tests including the unexpected failures.
-        res, out, err = logging_run(tests_included=True)
-        self.assertEqual(res, 1)
-        self.assertFalse(out.empty())
-        self.assertFalse(err.empty())
-
+        self.assertRaises(SystemExit, logging_run,
+                          ['--platform', 'test', 'resources'],
+                          tests_included=True)
+        self.assertRaises(SystemExit, logging_run,
+                          ['--platform', 'test', 'foo'],
+                          tests_included=True)
 
 def _mocked_open(original_open, file_list):
     def _wrapper(name, mode, encoding):
@@ -209,7 +144,7 @@ class RebaselineTest(unittest.TestCase):
             # is missing, update the expected generic location.
             file_list = []
             codecs.open = _mocked_open(original_open, file_list)
-            passing_run(['--pixel-tests',
+            passing_run(['--platform', 'test', '--pixel-tests',
                          '--reset-results',
                          'passes/image.html',
                          'failures/expected/missing_image.html'],
@@ -230,7 +165,7 @@ class RebaselineTest(unittest.TestCase):
             # is mssing, then create a new expectation in the platform dir.
             file_list = []
             codecs.open = _mocked_open(original_open, file_list)
-            passing_run(['--pixel-tests',
+            passing_run(['--platform', 'test', '--pixel-tests',
                          '--new-baseline',
                          'passes/image.html',
                          'failures/expected/missing_image.html'],
@@ -273,7 +208,6 @@ class DryrunTest(unittest.TestCase):
         if sys.platform != "darwin":
             return
 
-        self.assertTrue(passing_run(['--platform', 'test']))
         self.assertTrue(passing_run(['--platform', 'dryrun',
                                      'fast/html']))
         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
@@ -289,11 +223,6 @@ class TestThread(dump_render_tree_thread.WatchableThread):
         self._timeout_queue = Queue.Queue()
 
     def run(self):
-        self._covered_run()
-
-    def _covered_run(self):
-        # FIXME: this is a separate routine to work around a bug
-        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
         self._thread_id = thread.get_ident()
         try:
             self._started_queue.put('')
@@ -355,11 +284,8 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
         self.assertTrue(interrupted)
 
     def test_timeout(self):
-        oc = outputcapture.OutputCapture()
-        oc.capture_output()
         interrupted = self.run_one_thread('Timeout')
         self.assertFalse(interrupted)
-        oc.restore_output()
 
     def test_exception(self):
         self.assertRaises(ValueError, self.run_one_thread, 'Exception')
@@ -367,8 +293,6 @@ class WaitForThreadsToFinishTest(unittest.TestCase):
 
 class StandaloneFunctionsTest(unittest.TestCase):
     def test_log_wedged_thread(self):
-        oc = outputcapture.OutputCapture()
-        oc.capture_output()
         logger = run_webkit_tests._log
         astream = array_stream.ArrayStream()
         handler = TestHandler(astream)
@@ -386,7 +310,6 @@ class StandaloneFunctionsTest(unittest.TestCase):
 
         self.assertFalse(astream.empty())
         self.assertFalse(child_thread.isAlive())
-        oc.restore_output()
 
     def test_find_thread_stack(self):
         id, stack = sys._current_frames().items()[0]