nrwt: clean up printing.py
authordpranke@chromium.org <dpranke@chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 6 Aug 2012 20:46:30 +0000 (20:46 +0000)
committerdpranke@chromium.org <dpranke@chromium.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Mon, 6 Aug 2012 20:46:30 +0000 (20:46 +0000)
https://bugs.webkit.org/show_bug.cgi?id=93026

Reviewed by Dirk Pranke.

This patch cleans up the implementation of printing.py now
that we're not using all the original complexity.

There should be no changes in functionality and everything
should be covered by the existing tests.

* Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py:
(LayoutTestRunner.run_tests):
* Scripts/webkitpy/layout_tests/views/printing.py:
(Printer.__init__):
(Printer.__del__):
(Printer.print_config):
(Printer.print_found):
(Printer.print_expected):
(Printer.print_workers_and_shards):
(Printer):
(Printer._print_expected_results_of_type):
(Printer.print_results):
(Printer._print_timing_statistics):
(Printer._print_aggregate_test_statistics):
(Printer._print_individual_test_times):
(Printer._print_test_list_timing):
(Printer._print_directory_timings):
(Printer._print_statistics_for_test_timings):
(Printer._print_result_summary):
(Printer._print_result_summary_entry):
(Printer._print_one_line_summary):
(Printer.print_finished_test):
(Printer._print_test_result):
(Printer._print_test_trace):
(Printer._print_baseline):
(Printer._print_unexpected_test_result):
(Printer._print_progress):
(Printer._print_unexpected_results):
(Printer._print_unexpected_results.add_result):
(Printer._print_quiet):
(Printer._print_default):
(Printer._print_debug):
(Printer._print_for_bot):
(Printer.write_update):
(Printer.writeln):
* Scripts/webkitpy/layout_tests/views/printing_unittest.py:
(Testprinter.get_printer):
(Testprinter.test_print_config):
(Testprinter.test_print_one_line_summary):
(Testprinter.test_print_unexpected_results):
(test_details):

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@124800 268f45cc-cd09-0410-ab3c-d52691b4dbfc

Tools/ChangeLog
Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
Tools/Scripts/webkitpy/layout_tests/views/printing.py
Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py

index 71b4cae..050e8af 100644 (file)
@@ -1,5 +1,60 @@
 2012-08-06  Dirk Pranke  <dpranke@chromium.org>
 
+        nrwt: clean up printing.py
+        https://bugs.webkit.org/show_bug.cgi?id=93026
+
+        Reviewed by Dirk Pranke.
+
+        This patch cleans up the implementation of printing.py now
+        that we're not using all the original complexity.
+
+        There should be no changes in functionality and everything
+        should be covered by the existing tests.
+
+        * Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py:
+        (LayoutTestRunner.run_tests):
+        * Scripts/webkitpy/layout_tests/views/printing.py:
+        (Printer.__init__):
+        (Printer.__del__):
+        (Printer.print_config):
+        (Printer.print_found):
+        (Printer.print_expected):
+        (Printer.print_workers_and_shards):
+        (Printer):
+        (Printer._print_expected_results_of_type):
+        (Printer.print_results):
+        (Printer._print_timing_statistics):
+        (Printer._print_aggregate_test_statistics):
+        (Printer._print_individual_test_times):
+        (Printer._print_test_list_timing):
+        (Printer._print_directory_timings):
+        (Printer._print_statistics_for_test_timings):
+        (Printer._print_result_summary):
+        (Printer._print_result_summary_entry):
+        (Printer._print_one_line_summary):
+        (Printer.print_finished_test):
+        (Printer._print_test_result):
+        (Printer._print_test_trace):
+        (Printer._print_baseline):
+        (Printer._print_unexpected_test_result):
+        (Printer._print_progress):
+        (Printer._print_unexpected_results):
+        (Printer._print_unexpected_results.add_result):
+        (Printer._print_quiet):
+        (Printer._print_default):
+        (Printer._print_debug):
+        (Printer._print_for_bot):
+        (Printer.write_update):
+        (Printer.writeln):
+        * Scripts/webkitpy/layout_tests/views/printing_unittest.py:
+        (Testprinter.get_printer):
+        (Testprinter.test_print_config):
+        (Testprinter.test_print_one_line_summary):
+        (Testprinter.test_print_unexpected_results):
+        (test_details):
+
+2012-08-06  Dirk Pranke  <dpranke@chromium.org>
+
         change bots to pass --debug-rwt-logging instead of --verbose to new-run-webkit-tests
         https://bugs.webkit.org/show_bug.cgi?id=93043
 
index a5def9b..a199033 100644 (file)
@@ -140,7 +140,7 @@ class LayoutTestRunner(object):
                 pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
         except KeyboardInterrupt:
             self._printer.flush()
-            self._printer.write('Interrupted, exiting ...')
+            self._printer.writeln('Interrupted, exiting ...')
             keyboard_interrupted = True
         except TestRunInterruptedException, e:
             _log.warning(e.reason)
index 109baf6..0df5b0d 100755 (executable)
@@ -364,7 +364,6 @@ def parse_args(args=None):
         optparse.make_option("-n", "--dry-run", action="store_true",
             default=False,
             help="Do everything but actually run the tests or upload results."),
-        # old-run-webkit-tests has --valgrind instead of wrapper.
         optparse.make_option("--wrapper",
             help="wrapper command to insert before invocations of "
                  "DumpRenderTree; option is split on whitespace before "
@@ -379,7 +378,6 @@ def parse_args(args=None):
             help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
         optparse.make_option("--time-out-ms",
             help="Set the timeout for each test"),
-        # old-run-webkit-tests calls --randomize-order --random:
         optparse.make_option("--randomize-order", action="store_true",
             default=False, help=("Run tests in random order (useful "
                                 "for tracking down corruption)")),
@@ -388,13 +386,10 @@ def parse_args(args=None):
                  "of the layout tests")),
         optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                   "the nth of m parts, of the layout tests")),
-        # old-run-webkit-tests calls --batch-size: --nthly n
-        #   Restart DumpRenderTree every n tests (default: 1000)
         optparse.make_option("--batch-size",
             help=("Run a the tests in batches (n), after every n tests, "
                   "DumpRenderTree is relaunched."), type="int", default=None),
-        # old-run-webkit-tests calls --run-singly: -1|--singly
-        # Isolate each test case run (implies --nthly 1 --verbose)
+        # old-run-webkit-tests has --run-singly imply --verbose.
         optparse.make_option("--run-singly", action="store_true",
             default=False, help="run a separate DumpRenderTree for each test"),
         optparse.make_option("--child-processes",
index fc699bc..162a079 100755 (executable)
@@ -749,10 +749,10 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
 
     def test_retrying_and_flaky_tests(self):
         host = MockHost()
-        res, out, err, _ = logging_run(['failures/flaky'], tests_included=True, host=host)
+        res, out, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
         self.assertEquals(res, 0)
         self.assertTrue('Retrying' in err.getvalue())
-        self.assertTrue('Unexpected flakiness' in out.getvalue())
+        self.assertTrue('unexpected flakiness' in out.getvalue())
         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/tests_run0.txt'))
         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
@@ -764,8 +764,8 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
         self.assertEquals(res, 1)
         self.assertTrue('Clobbering old results' in err.getvalue())
         self.assertTrue('flaky/text.html' in err.getvalue())
-        self.assertTrue('Unexpected text diff' in out.getvalue())
-        self.assertFalse('Unexpected flakiness' in out.getvalue())
+        self.assertTrue('unexpected text diff' in out.getvalue())
+        self.assertFalse('unexpected flakiness' in out.getvalue())
         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
         self.assertFalse(host.filesystem.exists('retries'))
 
index b21812d..593c5e2 100644 (file)
@@ -38,11 +38,9 @@ from webkitpy.layout_tests.models import test_expectations
 from webkitpy.layout_tests.models.test_expectations import TestExpectations
 from webkitpy.layout_tests.views.metered_stream import MeteredStream
 
+
 NUM_SLOW_TESTS_TO_LOG = 10
 
-PRINT_QUIET = "one-line-progress,one-line-summary,unexpected,unexpected-results,updates"
-PRINT_DEFAULT = "config,misc,one-line-progress,one-line-summary,unexpected,unexpected-results,updates"
-PRINT_EVERYTHING = "actual,config,expected,misc,one-line-progress,one-line-summary,slowest,timing,unexpected,unexpected-results,updates"
 
 def print_options():
     return [
@@ -51,7 +49,7 @@ def print_options():
         optparse.make_option('-v', '--verbose', action='store_true', default=False, dest='debug_rwt_logging',
                              help='same as --debug-rwt-logging (for now)'),
         optparse.make_option('--details', action='store_true', default=False,
-                             help='print detailed information about each test'),
+                             help='print detailed results for every test'),
         optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
                              help='print timestamps and debug information for run-webkit-tests itself'),
     ]
@@ -70,30 +68,9 @@ class Printer(object):
     By default the buildbot-parsed code gets logged to stdout, and regular
     output gets logged to stderr."""
     def __init__(self, port, options, regular_output, buildbot_output, logger=None):
-        """
-        Args
-          port               interface to port-specific routines
-          options            OptionParser object with command line settings
-          regular_output     stream to which output intended only for humans
-                             should be written
-          buildbot_output    stream to which output intended to be read by
-                             the buildbots (and humans) should be written
-          logger             optional logger to integrate into the stream.
-        """
         self._port = port
         self._options = options
         self._buildbot_stream = buildbot_output
-
-        if options.debug_rwt_logging:
-            self.switches = PRINT_EVERYTHING.split(',')
-        elif options.quiet:
-            self.switches = PRINT_QUIET.split(',')
-        else:
-            self.switches = PRINT_DEFAULT.split(',')
-
-        if options.details:
-            self.switches.append('trace-everything')
-
         self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger)
 
     def cleanup(self):
@@ -102,72 +79,54 @@ class Printer(object):
     def __del__(self):
         self.cleanup()
 
-    # These two routines just hide the implementation of the switches.
-    def disabled(self, option):
-        return not option in self.switches
-
-    def enabled(self, option):
-        return option in self.switches
-
     def print_config(self):
-        """Prints the configuration for the test run."""
-        self._print_config("Using port '%s'" % self._port.name())
-        self._print_config("Test configuration: %s" % self._port.test_configuration())
-        self._print_config("Placing test results in %s" % self._options.results_directory)
+        self._print_default("Using port '%s'" % self._port.name())
+        self._print_default("Test configuration: %s" % self._port.test_configuration())
+        self._print_default("Placing test results in %s" % self._options.results_directory)
 
         # FIXME: should these options be in printing_options?
         if self._options.new_baseline:
-            self._print_config("Placing new baselines in %s" % self._port.baseline_path())
+            self._print_default("Placing new baselines in %s" % self._port.baseline_path())
 
         fs = self._port.host.filesystem
         fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
-        self._print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+        self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
 
-        self._print_config("Using %s build" % self._options.configuration)
+        self._print_default("Using %s build" % self._options.configuration)
         if self._options.pixel_tests:
-            self._print_config("Pixel tests enabled")
+            self._print_default("Pixel tests enabled")
         else:
-            self._print_config("Pixel tests disabled")
+            self._print_default("Pixel tests disabled")
 
-        self._print_config("Regular timeout: %s, slow test timeout: %s" %
-                           (self._options.time_out_ms, self._options.slow_time_out_ms))
+        self._print_default("Regular timeout: %s, slow test timeout: %s" %
+                  (self._options.time_out_ms, self._options.slow_time_out_ms))
 
-        self._print_config('Command line: ' + ' '.join(self._port.driver_cmd_line()))
-        self._print_config('')
+        self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
+        self._print_default('')
 
     def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
         found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
         if repeat_each * iterations > 1:
             found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
         found_str += ', skipping %d' % (num_all_test_files - num_to_run)
-        self._print_expected(found_str + '.')
+        self._print_default(found_str + '.')
 
     def print_expected(self, result_summary, tests_with_result_type_callback):
         self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
         self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
         self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
-        self._print_expected('')
+        self._print_debug('')
 
     def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
         driver_name = self._port.driver_name()
         if num_workers == 1:
-            self._print_config("Running 1 %s over %s." %
-                (driver_name, grammar.pluralize('shard', num_shards)))
+            self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards)))
         else:
-            self._print_config("Running %d %ss in parallel over %d shards (%d locked)." %
+            self._print_default("Running %d %ss in parallel over %d shards (%d locked)." %
                 (num_workers, driver_name, num_shards, num_locked_shards))
-        self._print_config('')
-
-    def _print_expected_results_of_type(self, result_summary,
-                                        result_type, result_type_str, tests_with_result_type_callback):
-        """Print the number of the tests in a given result class.
-
-        Args:
-          result_summary - the object containing all the results to report on
-          result_type - the particular result type to report in the summary.
-          result_type_str - a string description of the result_type.
-          expectations - populated TestExpectations object for stats
-        """
+        self._print_default('')
+
+    def _print_expected_results_of_type(self, result_summary, result_type, result_type_str, tests_with_result_type_callback):
         tests = tests_with_result_type_callback(result_type)
         now = result_summary.tests_by_timeline[test_expectations.NOW]
         wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
@@ -176,12 +135,9 @@ class Printer(object):
         # nicely-aligned table.
         fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
                   % (self._num_digits(now), self._num_digits(wontfix)))
-        self._print_expected(fmtstr %
-            (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+        self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
 
     def _num_digits(self, num):
-        """Returns the number of digits needed to represent the length of a
-        sequence."""
         ndigits = 1
         if len(num):
             ndigits = int(math.log10(len(num))) + 1
@@ -190,61 +146,36 @@ class Printer(object):
     def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results):
         self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
         self._print_result_summary(result_summary)
-
-        self.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
-
-        self.print_unexpected_results(unexpected_results)
+        self._print_one_line_summary(result_summary.total - result_summary.expected_skips,
+                                     result_summary.expected - result_summary.expected_skips,
+                                     result_summary.unexpected)
+        self._print_unexpected_results(unexpected_results)
 
     def _print_timing_statistics(self, total_time, thread_timings,
                                  directory_test_timings, individual_test_timings,
                                  result_summary):
-        """Record timing-specific information for the test run.
-
-        Args:
-          total_time: total elapsed time (in seconds) for the test run
-          thread_timings: wall clock time each thread ran for
-          directory_test_timings: timing by directory
-          individual_test_timings: timing by file
-          result_summary: summary object for the test run
-        """
-        self.print_timing("Test timing:")
-        self.print_timing("  %6.2f total testing time" % total_time)
-        self.print_timing("")
-        self.print_timing("Thread timing:")
+        self._print_debug("Test timing:")
+        self._print_debug("  %6.2f total testing time" % total_time)
+        self._print_debug("")
+        self._print_debug("Thread timing:")
         cuml_time = 0
         for t in thread_timings:
-            self.print_timing("    %10s: %5d tests, %6.2f secs" %
-                  (t['name'], t['num_tests'], t['total_time']))
+            self._print_debug("    %10s: %5d tests, %6.2f secs" % (t['name'], t['num_tests'], t['total_time']))
             cuml_time += t['total_time']
-        self.print_timing("   %6.2f cumulative, %6.2f optimal" %
-              (cuml_time, cuml_time / int(self._options.child_processes)))
-        self.print_timing("")
+        self._print_debug("   %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / int(self._options.child_processes)))
+        self._print_debug("")
 
         self._print_aggregate_test_statistics(individual_test_timings)
-        self._print_individual_test_times(individual_test_timings,
-                                          result_summary)
+        self._print_individual_test_times(individual_test_timings, result_summary)
         self._print_directory_timings(directory_test_timings)
 
     def _print_aggregate_test_statistics(self, individual_test_timings):
-        """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
-        Args:
-          individual_test_timings: List of TestResults for all tests.
-        """
         times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
-        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):",
-                                                times_for_dump_render_tree)
-
-    def _print_individual_test_times(self, individual_test_timings,
-                                     result_summary):
-        """Prints the run times for slow, timeout and crash tests.
-        Args:
-          individual_test_timings: List of TestStats for all tests.
-          result_summary: summary object for test run
-        """
-        # Reverse-sort by the time spent in DumpRenderTree.
-        individual_test_timings.sort(lambda a, b:
-            cmp(b.test_run_time, a.test_run_time))
+        self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
 
+    def _print_individual_test_times(self, individual_test_timings, result_summary):
+        # Reverse-sort by the time spent in DumpRenderTree.
+        individual_test_timings.sort(lambda a, b: cmp(b.test_run_time, a.test_run_time))
         num_printed = 0
         slow_tests = []
         timeout_or_crash_tests = []
@@ -267,63 +198,37 @@ class Printer(object):
                 num_printed = num_printed + 1
                 unexpected_slow_tests.append(test_tuple)
 
-        self.print_timing("")
-        self._print_test_list_timing("%s slowest tests that are not "
-            "marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
-        self.print_timing("")
+        self._print_debug("")
+        self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
+            NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+        self._print_debug("")
         self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
-        self.print_timing("")
-        self._print_test_list_timing("Tests that timed out or crashed:",
-                                     timeout_or_crash_tests)
-        self.print_timing("")
+        self._print_debug("")
+        self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
+        self._print_debug("")
 
     def _print_test_list_timing(self, title, test_list):
-        """Print timing info for each test.
-
-        Args:
-          title: section heading
-          test_list: tests that fall in this section
-        """
-        if self.disabled('slowest'):
-            return
-
-        self.print_timing(title)
+        self._print_debug(title)
         for test_tuple in test_list:
             test_run_time = round(test_tuple.test_run_time, 1)
-            self.print_timing("  %s took %s seconds" % (test_tuple.test_name, test_run_time))
+            self._print_debug("  %s took %s seconds" % (test_tuple.test_name, test_run_time))
 
     def _print_directory_timings(self, directory_test_timings):
-        """Print timing info by directory for any directories that
-        take > 10 seconds to run.
-
-        Args:
-          directory_test_timing: time info for each directory
-        """
         timings = []
         for directory in directory_test_timings:
             num_tests, time_for_directory = directory_test_timings[directory]
-            timings.append((round(time_for_directory, 1), directory,
-                            num_tests))
+            timings.append((round(time_for_directory, 1), directory, num_tests))
         timings.sort()
 
-        self.print_timing("Time to process slowest subdirectories:")
+        self._print_debug("Time to process slowest subdirectories:")
         min_seconds_to_print = 10
         for timing in timings:
             if timing[0] > min_seconds_to_print:
-                self.print_timing(
-                    "  %s took %s seconds to run %s tests." % (timing[1],
-                    timing[0], timing[2]))
-        self.print_timing("")
+                self._print_debug("  %s took %s seconds to run %s tests." % (timing[1], timing[0], timing[2]))
+        self._print_debug("")
 
     def _print_statistics_for_test_timings(self, title, timings):
-        """Prints the median, mean and standard deviation of the values in
-        timings.
-
-        Args:
-          title: Title for these timings.
-          timings: A list of floats representing times.
-        """
-        self.print_timing(title)
+        self._print_debug(title)
         timings.sort()
 
         num_tests = len(timings)
@@ -345,19 +250,17 @@ class Printer(object):
             sum_of_deviations = math.pow(timing - mean, 2)
 
         std_deviation = math.sqrt(sum_of_deviations / num_tests)
-        self.print_timing("  Median:          %6.3f" % median)
-        self.print_timing("  Mean:            %6.3f" % mean)
-        self.print_timing("  90th percentile: %6.3f" % percentile90)
-        self.print_timing("  99th percentile: %6.3f" % percentile99)
-        self.print_timing("  Standard dev:    %6.3f" % std_deviation)
-        self.print_timing("")
+        self._print_debug("  Median:          %6.3f" % median)
+        self._print_debug("  Mean:            %6.3f" % mean)
+        self._print_debug("  90th percentile: %6.3f" % percentile90)
+        self._print_debug("  99th percentile: %6.3f" % percentile99)
+        self._print_debug("  Standard dev:    %6.3f" % std_deviation)
+        self._print_debug("")
 
     def _print_result_summary(self, result_summary):
-        """Print a short summary about how many tests passed.
+        if not self._options.debug_rwt_logging:
+            return
 
-        Args:
-          result_summary: information to log
-        """
         failed = result_summary.total_failures
         total = result_summary.total - result_summary.expected_skips
         passed = total - failed - result_summary.remaining
@@ -365,34 +268,22 @@ class Printer(object):
         if total > 0:
             pct_passed = float(passed) * 100 / total
 
-        self.print_actual("")
-        self.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
-                     (passed, total, pct_passed))
-        self.print_actual("")
-        self._print_result_summary_entry(result_summary,
-            test_expectations.NOW, "Tests to be fixed")
+        self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, pct_passed))
+        self._print_for_bot("")
+        self._print_result_summary_entry(result_summary, test_expectations.NOW, "Tests to be fixed")
 
-        self.print_actual("")
+        self._print_for_bot("")
         # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
-        self._print_result_summary_entry(result_summary,
-            test_expectations.WONTFIX,
+        self._print_result_summary_entry(result_summary, test_expectations.WONTFIX,
             "Tests that will only be fixed if they crash (WONTFIX)")
-        self.print_actual("")
+        self._print_for_bot("")
 
-    def _print_result_summary_entry(self, result_summary, timeline,
-                                    heading):
-        """Print a summary block of results for a particular timeline of test.
-
-        Args:
-          result_summary: summary to print results for
-          timeline: the timeline to print results for (NOW, WONTFIX, etc.)
-          heading: a textual description of the timeline
-        """
+    def _print_result_summary_entry(self, result_summary, timeline, heading):
         total = len(result_summary.tests_by_timeline[timeline])
         not_passing = (total -
            len(result_summary.tests_by_expectation[test_expectations.PASS] &
                result_summary.tests_by_timeline[timeline]))
-        self.print_actual("=> %s (%d):" % (heading, not_passing))
+        self._print_for_bot("=> %s (%d):" % (heading, not_passing))
 
         for result in TestExpectations.EXPECTATION_ORDER:
             if result in (test_expectations.PASS, test_expectations.SKIP):
@@ -402,102 +293,60 @@ class Printer(object):
             desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
             if not_passing and len(results):
                 pct = len(results) * 100.0 / not_passing
-                self.print_actual("  %5d %-24s (%4.1f%%)" %
-                    (len(results), desc[len(results) != 1], pct))
-
-
-    def print_actual(self, msg):
-        if self.disabled('actual'):
-            return
-        self._buildbot_stream.write("%s\n" % msg)
-
-    def _print_config(self, msg):
-        self.write(msg, 'config')
-
-    def _print_expected(self, msg):
-        self.write(msg, 'expected')
-
-    def print_timing(self, msg):
-        self.write(msg, 'timing')
-
-    def print_one_line_summary(self, total, expected, unexpected):
-        """Print a one-line summary of the test run to stdout.
-
-        Args:
-          total: total number of tests run
-          expected: number of expected results
-          unexpected: number of unexpected results
-        """
-        if self.disabled('one-line-summary'):
-            return
+                self._print_for_bot("  %5d %-24s (%4.1f%%)" % (len(results), desc[len(results) != 1], pct))
 
+    def _print_one_line_summary(self, total, expected, unexpected):
         incomplete = total - expected - unexpected
         incomplete_str = ''
         if incomplete:
-            self._write("")
+            self._print_default("")
             incomplete_str = " (%d didn't run)" % incomplete
 
+        if self._options.debug_rwt_logging or unexpected:
+            self.writeln("")
+
+        summary = ''
         if unexpected == 0:
             if expected == total:
                 if expected > 1:
-                    self._write("All %d tests ran as expected." % expected)
+                    summary = "All %d tests ran as expected." % expected
                 else:
-                    self._write("The test ran as expected.")
+                    summary = "The test ran as expected."
             else:
-                self._write("%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str))
+                summary = "%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)
         else:
-            self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str))
-        self._write("")
+            summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)
+
+        self._print_quiet(summary)
+        self._print_quiet("")
 
     def print_finished_test(self, result, expected, exp_str, got_str, result_summary, retrying, test_files_list):
-        self.print_test_result(result, expected, exp_str, got_str)
-        self.print_progress(result_summary, retrying, test_files_list)
-
-    def print_test_result(self, result, expected, exp_str, got_str):
-        """Print the result of the test as determined by --print.
-
-        This routine is used to print the details of each test as it completes.
-
-        Args:
-            result   - The actual TestResult object
-            expected - Whether the result we got was an expected result
-            exp_str  - What we expected to get (used for tracing)
-            got_str  - What we actually got (used for tracing)
-
-        Note that we need all of these arguments even though they seem
-        somewhat redundant, in order to keep this routine from having to
-        known anything about the set of expectations.
-        """
-        if (self.enabled('trace-everything') or
-            self.enabled('trace-unexpected') and not expected):
+        self._print_test_result(result, expected, exp_str, got_str)
+        self._print_progress(result_summary, retrying, test_files_list)
+
+    def _print_test_result(self, result, expected, exp_str, got_str):
+        if self._options.details:
             self._print_test_trace(result, exp_str, got_str)
-        elif not expected and self.enabled('unexpected'):
+        elif not expected:
             self._print_unexpected_test_result(result)
 
     def _print_test_trace(self, result, exp_str, got_str):
-        """Print detailed results of a test (triggered by --print trace-*).
-        For each test, print:
-           - location of the expected baselines
-           - expected results
-           - actual result
-           - timing info
-        """
         test_name = result.test_name
-        self._write('trace: %s' % test_name)
+        self._print_default('trace: %s' % test_name)
 
         base = self._port.lookup_virtual_test_base(test_name)
         if base:
             args = ' '.join(self._port.lookup_virtual_test_args(test_name))
-            self._write(' base: %s' % base)
-            self._write(' args: %s' % args)
+            self._print_default(' base: %s' % base)
+            self._print_default(' args: %s' % args)
 
         for extension in ('.txt', '.png', '.wav', '.webarchive'):
             self._print_baseline(test_name, extension)
 
-        self._write('  exp: %s' % exp_str)
-        self._write('  got: %s' % got_str)
-        self._write(' took: %-.3f' % result.test_run_time)
-        self._write('')
+        self._print_default('  exp: %s' % exp_str)
+        self._print_default('  got: %s' % got_str)
+        self._print_default(' took: %-.3f' % result.test_run_time)
+        self._print_default('')
 
     def _print_baseline(self, test_name, extension):
         baseline = self._port.expected_filename(test_name, extension)
@@ -505,18 +354,14 @@ class Printer(object):
             relpath = self._port.relative_test_filename(baseline)
         else:
             relpath = '<none>'
-        self._write('  %s: %s' % (extension[1:], relpath))
+        self._print_default('  %s: %s' % (extension[1:], relpath))
 
     def _print_unexpected_test_result(self, result):
-        """Prints one unexpected test result line."""
         desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type][0]
-        self.write("  %s -> unexpected %s" % (result.test_name, desc), "unexpected")
+        self._print_quiet("  %s -> unexpected %s" % (result.test_name, desc))
 
-    def print_progress(self, result_summary, retrying, test_list):
+    def _print_progress(self, result_summary, retrying, test_list):
         """Print progress through the tests as determined by --print."""
-        if self.disabled('one-line-progress'):
-            return
-
         if result_summary.remaining == 0:
             self._meter.write_update('')
             return
@@ -531,11 +376,8 @@ class Printer(object):
             (action, percent_complete, result_summary.expected,
              result_summary.unexpected, result_summary.remaining))
 
-    def print_unexpected_results(self, unexpected_results):
-        """Prints a list of the unexpected results to the buildbot stream."""
-        if self.disabled('unexpected-results'):
-            return
-
+    def _print_unexpected_results(self, unexpected_results):
+        # Prints to the buildbot stream
         passes = {}
         flaky = {}
         regressions = {}
@@ -548,17 +390,11 @@ class Printer(object):
             expected = results['expected'].split(" ")
             if actual == ['PASS']:
                 if 'CRASH' in expected:
-                    add_to_dict_of_lists(passes,
-                                         'Expected to crash, but passed',
-                                         test)
+                    add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
                 elif 'TIMEOUT' in expected:
-                    add_to_dict_of_lists(passes,
-                                         'Expected to timeout, but passed',
-                                          test)
+                    add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
                 else:
-                    add_to_dict_of_lists(passes,
-                                         'Expected to fail, but passed',
-                                         test)
+                    add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
             elif len(actual) > 1:
                 # We group flaky tests by the first actual result we got.
                 add_to_dict_of_lists(flaky, actual[0], test)
@@ -568,23 +404,21 @@ class Printer(object):
         resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)
 
         if len(passes) or len(flaky) or len(regressions):
-            self._buildbot_stream.write("\n")
-
+            self._print_for_bot("")
         if len(passes):
             for key, tests in passes.iteritems():
-                self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
+                self._print_for_bot("%s: (%d)" % (key, len(tests)))
                 tests.sort()
                 for test in tests:
-                    self._buildbot_stream.write("  %s\n" % test)
-                self._buildbot_stream.write("\n")
-            self._buildbot_stream.write("\n")
+                    self._print_for_bot("  %s" % test)
+                self._print_for_bot("")
+            self._print_for_bot("")
 
         if len(flaky):
             descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
             for key, tests in flaky.iteritems():
                 result = TestExpectations.EXPECTATIONS[key.lower()]
-                self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
-                    % (descriptions[result][1], len(tests)))
+                self._print_for_bot("unexpected flakiness: %s (%d)" % (descriptions[result][1], len(tests)))
                 tests.sort()
 
                 for test in tests:
@@ -593,41 +427,41 @@ class Printer(object):
                     expected = result['expected'].split(" ")
                     result = TestExpectations.EXPECTATIONS[key.lower()]
                     new_expectations_list = list(set(actual) | set(expected))
-                    self._buildbot_stream.write("  %s = %s\n" %
-                        (test, " ".join(new_expectations_list)))
-                self._buildbot_stream.write("\n")
-            self._buildbot_stream.write("\n")
+                    self._print_for_bot("  %s = %s" % (test, " ".join(new_expectations_list)))
+                self._print_for_bot("")
+            self._print_for_bot("")
 
         if len(regressions):
             descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
             for key, tests in regressions.iteritems():
                 result = TestExpectations.EXPECTATIONS[key.lower()]
-                self._buildbot_stream.write(
-                    "Regressions: Unexpected %s : (%d)\n" % (
-                    descriptions[result][1], len(tests)))
+                self._print_for_bot("Regressions: unexpected %s : (%d)" % (descriptions[result][1], len(tests)))
                 tests.sort()
                 for test in tests:
-                    self._buildbot_stream.write("  %s = %s\n" % (test, key))
-                self._buildbot_stream.write("\n")
-            self._buildbot_stream.write("\n")
+                    self._print_for_bot("  %s = %s" % (test, key))
+                self._print_for_bot("")
 
         if len(unexpected_results['tests']) and self._options.debug_rwt_logging:
-            self._buildbot_stream.write("%s\n" % ("-" * 78))
+            self._print_for_bot("%s" % ("-" * 78))
 
-    def write_update(self, msg):
-        if self.disabled('updates'):
-            return
-        self._meter.write_update(msg)
+    def _print_quiet(self, msg):
+        self.writeln(msg)
 
-    def write(self, msg, option="misc"):
-        if self.disabled(option):
-            return
-        self._write(msg)
+    def _print_default(self, msg):
+        if not self._options.quiet:
+            self.writeln(msg)
+
+    def _print_debug(self, msg):
+        if self._options.debug_rwt_logging:
+            self.writeln(msg)
+
+    def _print_for_bot(self, msg):
+        self._buildbot_stream.write(msg + "\n")
 
-    def writeln(self, *args, **kwargs):
-        self._meter.writeln(*args, **kwargs)
+    def write_update(self, msg):
+        self._meter.write_update(msg)
 
-    def _write(self, msg):
+    def writeln(self, msg):
         self._meter.writeln(msg)
 
     def flush(self):
index fa80aca..564826c 100644 (file)
@@ -72,7 +72,7 @@ class  Testprinter(unittest.TestCase):
         stream.buflist = []
         stream.buf = ''
 
-    def get_printer(self, args=None, tty=False):
+    def get_printer(self, args=None):
         args = args or []
         printing_options = printing.print_options()
         option_parser = optparse.OptionParser(option_list=printing_options)
@@ -82,7 +82,6 @@ class  Testprinter(unittest.TestCase):
         nproc = 2
 
         regular_output = StringIO.StringIO()
-        regular_output.isatty = lambda: tty
         buildbot_output = StringIO.StringIO()
         printer = printing.Printer(self._port, options, regular_output, buildbot_output)
         return printer, regular_output, buildbot_output
@@ -122,25 +121,30 @@ class  Testprinter(unittest.TestCase):
         printer.print_config()
         self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
 
+        self.reset(err)
+        printer._options.quiet = True
+        printer.print_config()
+        self.assertFalse('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
+
     def test_print_one_line_summary(self):
         printer, err, out = self.get_printer()
-        printer.print_one_line_summary(1, 1, 0)
+        printer._print_one_line_summary(1, 1, 0)
         self.assertWritten(err, ["The test ran as expected.\n", "\n"])
 
         printer, err, out = self.get_printer()
-        printer.print_one_line_summary(1, 1, 0)
+        printer._print_one_line_summary(1, 1, 0)
         self.assertWritten(err, ["The test ran as expected.\n", "\n"])
 
         printer, err, out = self.get_printer()
-        printer.print_one_line_summary(2, 1, 1)
-        self.assertWritten(err, ["1 test ran as expected, 1 didn't:\n", "\n"])
+        printer._print_one_line_summary(2, 1, 1)
+        self.assertWritten(err, ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
 
         printer, err, out = self.get_printer()
-        printer.print_one_line_summary(3, 2, 1)
-        self.assertWritten(err, ["2 tests ran as expected, 1 didn't:\n", "\n"])
+        printer._print_one_line_summary(3, 2, 1)
+        self.assertWritten(err, ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
 
         printer, err, out = self.get_printer()
-        printer.print_one_line_summary(3, 2, 0)
+        printer._print_one_line_summary(3, 2, 0)
         self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
 
     def test_print_unexpected_results(self):
@@ -199,27 +203,27 @@ class  Testprinter(unittest.TestCase):
 
         # test everything running as expected
         ur = get_unexpected_results(expected=True, passing=False, flaky=False)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertEmpty(out)
 
         # test failures
         printer, err, out = self.get_printer()
         ur = get_unexpected_results(expected=False, passing=False, flaky=False)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertNotEmpty(out)
 
         # test unexpected flaky
         printer, err, out = self.get_printer()
         ur = get_unexpected_results(expected=False, passing=False, flaky=True)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertNotEmpty(out)
 
         printer, err, out = self.get_printer()
         ur = get_unexpected_results(expected=False, passing=False, flaky=False)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertNotEmpty(out)
 
@@ -229,13 +233,13 @@ BUGX : failures/expected/timeout.html = TIMEOUT
 """
         printer, err, out = self.get_printer()
         ur = get_unexpected_results(expected=False, passing=False, flaky=False)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertNotEmpty(out)
 
         printer, err, out = self.get_printer()
         ur = get_unexpected_results(expected=False, passing=True, flaky=False)
-        printer.print_unexpected_results(ur)
+        printer._print_unexpected_results(ur)
         self.assertEmpty(err)
         self.assertNotEmpty(out)
 
@@ -247,29 +251,7 @@ BUGX : failures/expected/timeout.html = TIMEOUT
     def test_details(self):
         printer, err, _ = self.get_printer(['--details'])
         result = self.get_result('passes/image.html')
-        printer.print_test_result(result, expected=False, exp_str='', got_str='')
-        self.assertNotEmpty(err)
-
-    def test_default(self):
-        printer, err, _ = self.get_printer()
-        printer.print_timing("foo")
-        self.assertEmpty(err)
-        printer._print_config("foo")
-        self.assertNotEmpty(err)
-
-    def test_quiet(self):
-        printer, err, _ = self.get_printer(['--quiet'])
-        printer._print_config("foo")
-        printer.print_timing("foo")
-        self.assertEmpty(err)
-
-    def test_verbose(self):
-        printer, err, _ = self.get_printer(['--verbose'])
-        printer.print_timing("foo")
-        self.assertNotEmpty(err)
-
-        printer, err, _ = self.get_printer(['--verbose'])
-        printer._print_config("foo")
+        printer._print_test_result(result, expected=False, exp_str='', got_str='')
         self.assertNotEmpty(err)