Chromium Windows Perf bots timeout due to not output
authorrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 27 Apr 2012 19:21:16 +0000 (19:21 +0000)
committerrniwa@webkit.org <rniwa@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 27 Apr 2012 19:21:16 +0000 (19:21 +0000)
https://bugs.webkit.org/show_bug.cgi?id=84940

Reviewed by Dirk Pranke.

Dirk and I investigated the issue on the bot but we couldn't figure out what is going wrong.
Since we run-perf-tests don't need any of fancy feature printer provides, just use python's
built-in logging module instead. Printing out to stdout and stderr seem to work so hopefully
this will fix the issue on the bot.

* Scripts/run-perf-tests:
* Scripts/webkitpy/performance_tests/perftest.py:
(PerfTest.run):
(PerfTest.run_failed):
(PerfTest.parse_output):
(PerfTest.output_statistics):
(ChromiumStylePerfTest.parse_output):
(PageLoadingPerfTest.run):
* Scripts/webkitpy/performance_tests/perftest_unittest.py:
(MainTest.test_parse_output):
(MainTest.test_parse_output_with_failing_line):
(TestPageLoadingPerfTest):
(TestPageLoadingPerfTest.test_run):
(TestPageLoadingPerfTest.test_run_with_bad_output):
* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner.__init__):
(PerfTestsRunner._parse_args):
(PerfTestsRunner.run):
(PerfTestsRunner._upload_json):
(PerfTestsRunner._print_status):
(PerfTestsRunner._run_tests_set):
(PerfTestsRunner._run_single_test):
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(create_runner):
(test_run_test_set):
(test_run_test_set_kills_drt_per_run):
(test_run_test_pause_before_testing):
(test_run_test_set_for_parser_tests):
(test_run_test_set_with_json_output):
(test_run_test_set_with_json_source):
(test_run_test_set_with_multiple_repositories):
(test_upload_json):
(test_parse_args):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@115466 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/run-perf-tests
Tools/Scripts/webkitpy/performance_tests/perftest.py
Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py

index ad259b9..fbd8f9d 100644 (file)
@@ -1,3 +1,49 @@
+2012-04-27  Ryosuke Niwa  <rniwa@webkit.org>
+
+        Chromium Windows Perf bots timeout due to not output
+        https://bugs.webkit.org/show_bug.cgi?id=84940
+
+        Reviewed by Dirk Pranke.
+
+        Dirk and I investigated the issue on the bot but we couldn't figure out what is going wrong.
+        Since we run-perf-tests don't need any of fancy feature printer provides, just use python's
+        built-in logging module instead. Printing out to stdout and stderr seem to work so hopefully
+        this will fix the issue on the bot.
+
+        * Scripts/run-perf-tests:
+        * Scripts/webkitpy/performance_tests/perftest.py:
+        (PerfTest.run):
+        (PerfTest.run_failed):
+        (PerfTest.parse_output):
+        (PerfTest.output_statistics):
+        (ChromiumStylePerfTest.parse_output):
+        (PageLoadingPerfTest.run):
+        * Scripts/webkitpy/performance_tests/perftest_unittest.py:
+        (MainTest.test_parse_output):
+        (MainTest.test_parse_output_with_failing_line):
+        (TestPageLoadingPerfTest):
+        (TestPageLoadingPerfTest.test_run):
+        (TestPageLoadingPerfTest.test_run_with_bad_output):
+        * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+        (PerfTestsRunner.__init__):
+        (PerfTestsRunner._parse_args):
+        (PerfTestsRunner.run):
+        (PerfTestsRunner._upload_json):
+        (PerfTestsRunner._print_status):
+        (PerfTestsRunner._run_tests_set):
+        (PerfTestsRunner._run_single_test):
+        * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+        (create_runner):
+        (test_run_test_set):
+        (test_run_test_set_kills_drt_per_run):
+        (test_run_test_pause_before_testing):
+        (test_run_test_set_for_parser_tests):
+        (test_run_test_set_with_json_output):
+        (test_run_test_set_with_json_source):
+        (test_run_test_set_with_multiple_repositories):
+        (test_upload_json):
+        (test_parse_args):
+
 2012-04-27  Dirk Pranke  <dpranke@chromium.org>
 
         [chromium] use "drt-style" output, not "test-shell-style" output, on mac and linux DRT
index 6ac02f4..95e04a0 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2012 Google Inc. All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
@@ -34,8 +34,6 @@ import sys
 
 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
 
-_log = logging.getLogger(__name__)
-
 if '__main__' == __name__:
     logging.basicConfig(level=logging.INFO, format="%(message)s")
     sys.exit(PerfTestsRunner().run())
index 28601ef..509dd1d 100644 (file)
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+import logging
 import math
 import re
 
 from webkitpy.layout_tests.port.driver import DriverInput
 
 
+_log = logging.getLogger(__name__)
+
+
 class PerfTest(object):
     def __init__(self, test_name, path_or_url):
         self._test_name = test_name
@@ -45,24 +49,24 @@ class PerfTest(object):
     def path_or_url(self):
         return self._path_or_url
 
-    def run(self, driver, timeout_ms, printer, buildbot_output):
+    def run(self, driver, timeout_ms):
         output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False))
-        if self.run_failed(output, printer):
+        if self.run_failed(output):
             return None
-        return self.parse_output(output, printer, buildbot_output)
+        return self.parse_output(output)
 
-    def run_failed(self, output, printer):
+    def run_failed(self, output):
         if output.text == None or output.error:
             pass
         elif output.timeout:
-            printer.write('timeout: %s' % self.test_name())
+            _log.error('timeout: %s' % self.test_name())
         elif output.crash:
-            printer.write('crash: %s' % self.test_name())
+            _log.error('crash: %s' % self.test_name())
         else:
             return False
 
         if output.error:
-            printer.write('error: %s\n%s' % (self.test_name(), output.error))
+            _log.error('error: %s\n%s' % (self.test_name(), output.error))
 
         return True
 
@@ -86,7 +90,7 @@ class PerfTest(object):
                 return True
         return False
 
-    def parse_output(self, output, printer, buildbot_output):
+    def parse_output(self, output):
         got_a_result = False
         test_failed = False
         results = {}
@@ -103,7 +107,7 @@ class PerfTest(object):
 
             if not self._should_ignore_line_in_parser_test_result(line):
                 test_failed = True
-                printer.write("%s" % line)
+                _log.error(line)
 
         if test_failed or set(self._statistics_keys) != set(results.keys()):
             return None
@@ -111,14 +115,14 @@ class PerfTest(object):
         results['unit'] = unit
 
         test_name = re.sub(r'\.\w+$', '', self._test_name)
-        self.output_statistics(test_name, results, buildbot_output)
+        self.output_statistics(test_name, results)
 
         return {test_name: results}
 
-    def output_statistics(self, test_name, results, buildbot_output):
+    def output_statistics(self, test_name, results):
         unit = results['unit']
-        buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit))
-        buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]]) + '\n')
+        _log.info('RESULT %s= %s %s' % (test_name.replace('/', ': '), results['avg'], unit))
+        _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]]))
 
 
 class ChromiumStylePerfTest(PerfTest):
@@ -127,7 +131,7 @@ class ChromiumStylePerfTest(PerfTest):
     def __init__(self, test_name, path_or_url):
         super(ChromiumStylePerfTest, self).__init__(test_name, path_or_url)
 
-    def parse_output(self, output, printer, buildbot_output):
+    def parse_output(self, output):
         test_failed = False
         got_a_result = False
         results = {}
@@ -136,10 +140,10 @@ class ChromiumStylePerfTest(PerfTest):
             if resultLine:
                 # FIXME: Store the unit
                 results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
-                buildbot_output.write("%s\n" % line)
+                _log.info(line)
             elif not len(line) == 0:
                 test_failed = True
-                printer.write("%s" % line)
+                _log.error(line)
         return results if results and not test_failed else None
 
 
@@ -147,12 +151,12 @@ class PageLoadingPerfTest(PerfTest):
     def __init__(self, test_name, path_or_url):
         super(PageLoadingPerfTest, self).__init__(test_name, path_or_url)
 
-    def run(self, driver, timeout_ms, printer, buildbot_output):
+    def run(self, driver, timeout_ms):
         test_times = []
 
         for i in range(0, 20):
             output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False))
-            if self.run_failed(output, printer):
+            if self.run_failed(output):
                 return None
             if i == 0:
                 continue
@@ -177,7 +181,7 @@ class PageLoadingPerfTest(PerfTest):
             'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
             'stdev': math.sqrt(squareSum),
             'unit': 'ms'}
-        self.output_statistics(self.test_name(), results, buildbot_output)
+        self.output_statistics(self.test_name(), results)
         return {self.test_name(): results}
 
 
index ced3e1b..21efd2c 100755 (executable)
@@ -31,6 +31,7 @@ import StringIO
 import math
 import unittest
 
+from webkitpy.common.system.outputcapture import OutputCapture
 from webkitpy.layout_tests.port.driver import DriverOutput
 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
 from webkitpy.performance_tests.perftest import PageLoadingPerfTest
@@ -38,18 +39,8 @@ from webkitpy.performance_tests.perftest import PerfTest
 from webkitpy.performance_tests.perftest import PerfTestFactory
 
 
-class MockPrinter(object):
-    def __init__(self):
-        self.written_lines = []
-
-    def write(self, line):
-        self.written_lines.append(line)
-
-
 class MainTest(unittest.TestCase):
     def test_parse_output(self):
-        printer = MockPrinter()
-        buildbot_output = StringIO.StringIO()
         output = DriverOutput('\n'.join([
             'Running 20 times',
             'Ignoring warm-up run (1115)',
@@ -59,14 +50,19 @@ class MainTest(unittest.TestCase):
             'stdev 11',
             'min 1080',
             'max 1120']), image=None, image_hash=None, audio=None)
-        test = PerfTest('some-test', '/path/some-dir/some-test')
-        self.assertEqual(test.parse_output(output, printer, buildbot_output),
-            {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
-        self.assertEqual(printer.written_lines, [])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PerfTest('some-test', '/path/some-dir/some-test')
+            self.assertEqual(test.parse_output(output),
+                {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
 
     def test_parse_output_with_failing_line(self):
-        printer = MockPrinter()
-        buildbot_output = StringIO.StringIO()
         output = DriverOutput('\n'.join([
             'Running 20 times',
             'Ignoring warm-up run (1115)',
@@ -78,15 +74,19 @@ class MainTest(unittest.TestCase):
             'stdev 11',
             'min 1080',
             'max 1120']), image=None, image_hash=None, audio=None)
-        test = PerfTest('some-test', '/path/some-dir/some-test')
-        self.assertEqual(test.parse_output(output, printer, buildbot_output), None)
-        self.assertEqual(printer.written_lines, ['some-unrecognizable-line'])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PerfTest('some-test', '/path/some-dir/some-test')
+            self.assertEqual(test.parse_output(output), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
 
 
 class TestPageLoadingPerfTest(unittest.TestCase):
-    def assertWritten(self, stream, contents):
-        self.assertEquals(stream.buflist, contents)
-
     class MockDriver(object):
         def __init__(self, values):
             self._values = values
@@ -101,22 +101,31 @@ class TestPageLoadingPerfTest(unittest.TestCase):
                 return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1])
 
     def test_run(self):
-        printer = MockPrinter()
-        buildbot_output = StringIO.StringIO()
         test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test')
         driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
-        self.assertEqual(test.run(driver, None, printer, buildbot_output),
-            {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
-        self.assertEqual(printer.written_lines, [])
-        self.assertWritten(buildbot_output, ['RESULT some-test= 11000.0 ms\n', 'median= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n'])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(test.run(driver, None),
+                {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n')
 
     def test_run_with_bad_output(self):
-        printer = MockPrinter()
-        buildbot_output = StringIO.StringIO()
-        test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test')
-        driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
-        self.assertEqual(test.run(driver, None, printer, buildbot_output), None)
-        self.assertEqual(printer.written_lines, ['error: some-test\nsome error'])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test')
+            driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
+            self.assertEqual(test.run(driver, None), None)
+        finally:
+            actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+        self.assertEqual(actual_stdout, '')
+        self.assertEqual(actual_stderr, '')
+        self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
 
 
 class TestPerfTestFactory(unittest.TestCase):
index f47c4a3..b4c2949 100644 (file)
@@ -52,8 +52,7 @@ class PerfTestsRunner(object):
     _EXIT_CODE_BAD_JSON = -2
     _EXIT_CODE_FAILED_UPLOADING = -3
 
-    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
-        self._buildbot_output = buildbot_output
+    def __init__(self, args=None, port=None):
         self._options, self._args = PerfTestsRunner._parse_args(args)
         if port:
             self._port = port
@@ -62,7 +61,6 @@ class PerfTestsRunner(object):
             self._host = Host()
             self._port = self._host.port_factory.get(self._options.platform, self._options)
         self._host._initialize_scm()
-        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output)
         self._webkit_base_dir_len = len(self._port.webkit_base())
         self._base_path = self._port.perf_tests_dir()
         self._results = {}
@@ -70,8 +68,6 @@ class PerfTestsRunner(object):
 
     @staticmethod
     def _parse_args(args=None):
-        print_options = printing.print_options()
-
         perf_option_list = [
             optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                 help='Set the configuration to Debug'),
@@ -102,9 +98,7 @@ class PerfTestsRunner(object):
             optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                 help="Use WebKitTestRunner rather than DumpRenderTree."),
             ]
-
-        option_list = (perf_option_list + print_options)
-        return optparse.OptionParser(option_list=option_list).parse_args(args)
+        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
 
     def _collect_tests(self):
         """Return the list of tests found."""
@@ -133,11 +127,6 @@ class PerfTestsRunner(object):
         return tests
 
     def run(self):
-        if self._options.help_printing:
-            self._printer.help_printing()
-            self._printer.cleanup()
-            return 0
-
         if not self._port.check_build(needs_http=False):
             _log.error("Build not up to date for %s" % self._port._path_to_driver())
             return self._EXIT_CODE_BAD_BUILD
@@ -145,11 +134,8 @@ class PerfTestsRunner(object):
         # We wrap any parts of the run that are slow or likely to raise exceptions
         # in a try/finally to ensure that we clean up the logging configuration.
         unexpected = -1
-        try:
-            tests = self._collect_tests()
-            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
-        finally:
-            self._printer.cleanup()
+        tests = self._collect_tests()
+        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
 
         options = self._options
         if self._options.output_json_path:
@@ -209,7 +195,7 @@ class PerfTestsRunner(object):
                 _log.error(line)
             return False
 
-        self._printer.write("JSON file uploaded.")
+        _log.info("JSON file uploaded.")
         return True
 
     def _print_status(self, tests, expected, unexpected):
@@ -219,7 +205,7 @@ class PerfTestsRunner(object):
             status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
         if unexpected:
             status += " (%d didn't run)" % unexpected
-        self._printer.write(status)
+        _log.info(status)
 
     def _run_tests_set(self, tests, port):
         result_count = len(tests)
@@ -236,13 +222,13 @@ class PerfTestsRunner(object):
                     driver.stop()
                     return unexpected
 
-            self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
+            _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
             if self._run_single_test(test, driver):
                 expected = expected + 1
             else:
                 unexpected = unexpected + 1
 
-            self._printer.write('')
+            _log.info('')
 
             driver.stop()
 
@@ -251,12 +237,12 @@ class PerfTestsRunner(object):
     def _run_single_test(self, test, driver):
         start_time = time.time()
 
-        new_results = test.run(driver, self._options.time_out_ms, self._printer, self._buildbot_output)
+        new_results = test.run(driver, self._options.time_out_ms)
         if new_results:
             self._results.update(new_results)
         else:
-            self._printer.write('FAILED')
+            _log.error('FAILED')
 
-        self._printer.write("Finished: %f s" % (time.time() - start_time))
+        _log.debug("Finished: %f s" % (time.time() - start_time))
 
         return new_results != None
index 4db97fb..be925c9 100755 (executable)
@@ -111,15 +111,12 @@ max 1120
         def stop(self):
             """do nothing"""
 
-    def create_runner(self, buildbot_output=None, args=[], regular_output=None, driver_class=TestDriver):
-        buildbot_output = buildbot_output or StringIO.StringIO()
-        regular_output = regular_output or StringIO.StringIO()
-
+    def create_runner(self, args=[], driver_class=TestDriver):
         options, parsed_args = PerfTestsRunner._parse_args(args)
         test_port = TestPort(host=MockHost(), options=options)
         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
 
-        runner = PerfTestsRunner(regular_output, buildbot_output, args=args, port=test_port)
+        runner = PerfTestsRunner(args=args, port=test_port)
         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
@@ -161,13 +158,17 @@ max 1120
         return tests
 
     def test_run_test_set(self):
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output)
+        runner = self.create_runner()
         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
-        unexpected_result_count = runner._run_tests_set(tests, runner._port)
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests, runner._port)
+        finally:
+            stdout, stderr, log = output.restore_output()
         self.assertEqual(unexpected_result_count, len(tests) - 1)
-        self.assertWritten(buildbot_output, ['RESULT group_name: test_name= 42 ms\n'])
+        self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
 
     def test_run_test_set_kills_drt_per_run(self):
 
@@ -177,12 +178,12 @@ max 1120
             def stop(self):
                 TestDriverWithStopCount.stop_count += 1
 
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output, driver_class=TestDriverWithStopCount)
+        runner = self.create_runner(driver_class=TestDriverWithStopCount)
+
         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
-
         unexpected_result_count = runner._run_tests_set(tests, runner._port)
+
         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
 
     def test_run_test_pause_before_testing(self):
@@ -192,42 +193,58 @@ max 1120
             def start(self):
                 TestDriverWithStartCount.start_count += 1
 
-        buildbot_output = StringIO.StringIO()
-        regular_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output, args=["--pause-before-testing"], regular_output=regular_output, driver_class=TestDriverWithStartCount)
+        runner = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
         tests = self._tests_for_runner(runner, ['inspector/pass.html'])
 
+        output = OutputCapture()
+        output.capture_output()
         try:
-            output = OutputCapture()
-            output.capture_output()
             unexpected_result_count = runner._run_tests_set(tests, runner._port)
             self.assertEqual(TestDriverWithStartCount.start_count, 1)
         finally:
-            _, stderr, _ = output.restore_output()
-            self.assertEqual(stderr, "Ready to run test?\n")
-            self.assertTrue("Running inspector/pass.html (1 of 1)" in regular_output.getvalue())
+            stdout, stderr, log = output.restore_output()
+        self.assertEqual(stderr, "Ready to run test?\n")
+        self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n")
 
     def test_run_test_set_for_parser_tests(self):
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output)
+        runner = self.create_runner()
         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
-        unexpected_result_count = runner._run_tests_set(tests, runner._port)
+        output = OutputCapture()
+        output.capture_output()
+        try:
+            unexpected_result_count = runner._run_tests_set(tests, runner._port)
+        finally:
+            stdout, stderr, log = output.restore_output()
         self.assertEqual(unexpected_result_count, 0)
-        self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
-                                             'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
-                                             'RESULT Parser: some-parser= 1100.0 ms\n',
-                                             'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n'])
+        self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+        'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+        'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+        '',
+        'Running Parser/some-parser.html (2 of 2)',
+        'RESULT Parser: some-parser= 1100.0 ms',
+        'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
+        '', '']))
 
     def test_run_test_set_with_json_output(self):
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json'])
+        runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
         runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
         runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
         runner._timestamp = 123456789
-        self.assertEqual(runner.run(), 0)
-        self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
-                                             'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
-                                             'RESULT group_name: test_name= 42 ms\n'])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(runner.run(), 0)
+        finally:
+            stdout, stderr, logs = output_capture.restore_output()
+
+        self.assertEqual(logs,
+            '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+                       'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+                       'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+                       '',
+                       'Running inspector/pass.html (2 of 2)',
+                       'RESULT group_name: test_name= 42 ms',
+                       '', '']))
 
         self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
             "timestamp": 123456789, "results":
@@ -236,17 +253,25 @@ max 1120
             "webkit-revision": 5678})
 
     def test_run_test_set_with_json_source(self):
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json',
-            '--source-json-path=/mock-checkout/source.json'])
+        runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json', '--source-json-path=/mock-checkout/source.json'])
         runner._host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
         runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
         runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
         runner._timestamp = 123456789
-        self.assertEqual(runner.run(), 0)
-        self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
-                                             'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
-                                             'RESULT group_name: test_name= 42 ms\n'])
+        output_capture = OutputCapture()
+        output_capture.capture_output()
+        try:
+            self.assertEqual(runner.run(), 0)
+        finally:
+            stdout, stderr, logs = output_capture.restore_output()
+
+        self.assertEqual(logs, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+            'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+            'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+            '',
+            'Running inspector/pass.html (2 of 2)',
+            'RESULT group_name: test_name= 42 ms',
+            '', '']))
 
         self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
             "timestamp": 123456789, "results":
@@ -256,13 +281,11 @@ max 1120
             "key": "value"})
 
     def test_run_test_set_with_multiple_repositories(self):
-        buildbot_output = StringIO.StringIO()
-        runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json'])
+        runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
         runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
         runner._timestamp = 123456789
         runner._port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
         self.assertEqual(runner.run(), 0)
-
         self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
             "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
 
@@ -297,8 +320,7 @@ max 1120
         self.assertEqual(runner.run(), -3)
 
     def test_upload_json(self):
-        regular_output = StringIO.StringIO()
-        runner = self.create_runner(regular_output=regular_output)
+        runner = self.create_runner()
         runner._host.filesystem.files['/mock-checkout/some.json'] = 'some content'
 
         called = []
@@ -390,7 +412,6 @@ max 1120
     def test_parse_args(self):
         runner = self.create_runner()
         options, args = PerfTestsRunner._parse_args([
-                '--verbose',
                 '--build-directory=folder42',
                 '--platform=platform42',
                 '--builder-name', 'webkit-mac-1',
@@ -399,17 +420,14 @@ max 1120
                 '--output-json-path=a/output.json',
                 '--source-json-path=a/source.json',
                 '--test-results-server=somehost',
-                '--debug', 'an_arg'])
+                '--debug'])
         self.assertEqual(options.build, True)
-        self.assertEqual(options.verbose, True)
-        self.assertEqual(options.help_printing, None)
         self.assertEqual(options.build_directory, 'folder42')
         self.assertEqual(options.platform, 'platform42')
         self.assertEqual(options.builder_name, 'webkit-mac-1')
         self.assertEqual(options.build_number, '56')
         self.assertEqual(options.time_out_ms, '42')
         self.assertEqual(options.configuration, 'Debug')
-        self.assertEqual(options.print_options, None)
         self.assertEqual(options.output_json_path, 'a/output.json')
         self.assertEqual(options.source_json_path, 'a/source.json')
         self.assertEqual(options.test_results_server, 'somehost')