2010-04-09 Adam Barth <abarth@webkit.org>
authorabarth@webkit.org <abarth@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 9 Apr 2010 23:19:39 +0000 (23:19 +0000)
committerabarth@webkit.org <abarth@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
Fri, 9 Apr 2010 23:19:39 +0000 (23:19 +0000)
        Reviewed by Eric Seidel.

        new-run-webkit-tests should talk about DumpRenderTree not test_shell
        https://bugs.webkit.org/show_bug.cgi?id=37371

        test_shell is some strange Chromium thing.
        DumpRenderTree (tm) is the real deal.

        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py: Added.
        * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py:
        * Scripts/webkitpy/layout_tests/layout_package/test_failures.py:
        * Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py: Removed.
        * Scripts/webkitpy/layout_tests/port/chromium.py:
        * Scripts/webkitpy/layout_tests/port/server_process.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:

git-svn-id: https://svn.webkit.org/repository/webkit/trunk@57381 268f45cc-cd09-0410-ab3c-d52691b4dbfc

WebKitTools/ChangeLog
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py [moved from WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py with 90% similarity]
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py

index bac283a..87f9e42 100644 (file)
@@ -1,3 +1,21 @@
+2010-04-09  Adam Barth  <abarth@webkit.org>
+
+        Reviewed by Eric Seidel.
+
+        new-run-webkit-tests should talk about DumpRenderTree not test_shell
+        https://bugs.webkit.org/show_bug.cgi?id=37371
+
+        test_shell is some strange Chromium thing.
+        DumpRenderTree (tm) is the real deal.
+
+        * Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py: Added.
+        * Scripts/webkitpy/layout_tests/layout_package/test_expectations.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_failures.py:
+        * Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py: Removed.
+        * Scripts/webkitpy/layout_tests/port/chromium.py:
+        * Scripts/webkitpy/layout_tests/port/server_process.py:
+        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
+
 2010-04-09  Zoltan Horvath  <zoltan@webkit.org>
 
         Reviewed by Alexey Proskuryakov.
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""A Thread object for running the test shell and processing URLs from a
+"""A Thread object for running DumpRenderTree and processing URLs from a
 shared queue.
 
-Each thread runs a separate instance of the test_shell binary and validates
+Each thread runs a separate instance of the DumpRenderTree binary and validates
 the output.  When there are no more URLs to process in the shared queue, the
 thread exits.
 """
@@ -48,18 +48,18 @@ import time
 import test_failures
 
 _log = logging.getLogger("webkitpy.layout_tests.layout_package."
-                         "test_shell_thread")
+                         "dump_render_tree_thread")
 
 
 def process_output(port, test_info, test_types, test_args, configuration,
                    output_dir, crash, timeout, test_run_time, actual_checksum,
                    output, error):
-    """Receives the output from a test_shell process, subjects it to a number
+    """Receives the output from a DumpRenderTree process, subjects it to a number
     of tests, and returns a list of failure types the test produced.
 
     Args:
       port: port-specific hooks
-      proc: an active test_shell process
+      proc: an active DumpRenderTree process
       test_info: Object containing the test filename, uri and timeout
       test_types: list of test types to subject the output to
       test_args: arguments to be passed to each test
@@ -171,7 +171,7 @@ class TestShellThread(threading.Thread):
 
     def __init__(self, port, filename_list_queue, result_queue,
                  test_types, test_args, image_path, shell_args, options):
-        """Initialize all the local state for this test shell thread.
+        """Initialize all the local state for this DumpRenderTree thread.
 
         Args:
           port: interface to port-specific hooks
@@ -182,7 +182,7 @@ class TestShellThread(threading.Thread):
           test_types: A list of TestType objects to run the test output
               against.
           test_args: A TestArguments object to pass to each TestType.
-          shell_args: Any extra arguments to be passed to test_shell.exe.
+          shell_args: Any extra arguments to be passed to DumpRenderTree.
           options: A property dictionary as produced by optparse. The
               command-line options should match those expected by
               run_webkit_tests; they are typically passed via the
@@ -304,7 +304,7 @@ class TestShellThread(threading.Thread):
                     self._current_dir, self._filename_list = \
                         self._filename_list_queue.get_nowait()
                 except Queue.Empty:
-                    self._kill_test_shell()
+                    self._kill_dump_render_tree()
                     tests_run_file.close()
                     return
 
@@ -324,9 +324,9 @@ class TestShellThread(threading.Thread):
             filename = test_info.filename
             tests_run_file.write(filename + "\n")
             if failures:
-                # Check and kill test shell if we need too.
-                if len([1 for f in failures if f.should_kill_test_shell()]):
-                    self._kill_test_shell()
+                # Check and kill DumpRenderTree if we need too.
+                if len([1 for f in failures if f.should_kill_dump_render_tree()]):
+                    self._kill_dump_render_tree()
                     # Reset the batch count since the shell just bounced.
                     batch_count = 0
                 # Print the error message(s).
@@ -341,7 +341,7 @@ class TestShellThread(threading.Thread):
 
             if batch_size > 0 and batch_count > batch_size:
                 # Bounce the shell and reset count.
-                self._kill_test_shell()
+                self._kill_dump_render_tree()
                 batch_count = 0
 
             if test_runner:
@@ -370,20 +370,20 @@ class TestShellThread(threading.Thread):
 
         worker.start()
 
-        # When we're running one test per test_shell process, we can enforce
-        # a hard timeout. the test_shell watchdog uses 2.5x the timeout
+        # When we're running one test per DumpRenderTree process, we can enforce
+        # a hard timeout. the DumpRenderTree watchdog uses 2.5x the timeout
         # We want to be larger than that.
         worker.join(int(test_info.timeout) * 3.0 / 1000.0)
         if worker.isAlive():
             # If join() returned with the thread still running, the
-            # test_shell.exe is completely hung and there's nothing
+            # DumpRenderTree is completely hung and there's nothing
             # more we can do with it.  We have to kill all the
-            # test_shells to free it up. If we're running more than
-            # one test_shell thread, we'll end up killing the other
-            # test_shells too, introducing spurious crashes. We accept that
+            # DumpRenderTrees to free it up. If we're running more than
+            # one DumpRenderTree thread, we'll end up killing the other
+            # DumpRenderTrees too, introducing spurious crashes. We accept that
             # tradeoff in order to avoid losing the rest of this thread's
             # results.
-            _log.error('Test thread hung: killing all test_shells')
+            _log.error('Test thread hung: killing all DumpRenderTrees')
             worker._driver.stop()
 
         try:
@@ -398,7 +398,7 @@ class TestShellThread(threading.Thread):
         return failures
 
     def _run_test(self, test_info):
-        """Run a single test file using a shared test_shell process.
+        """Run a single test file using a shared DumpRenderTree process.
 
         Args:
           test_info: Object containing the test filename, uri and timeout
@@ -406,7 +406,7 @@ class TestShellThread(threading.Thread):
         Return:
           A list of TestFailure objects describing the error.
         """
-        self._ensure_test_shell_is_running()
+        self._ensure_dump_render_tree_is_running()
         # The pixel_hash is used to avoid doing an image dump if the
         # checksums match, so it should be set to a blank value if we
         # are generating a new baseline.  (Otherwise, an image from a
@@ -428,17 +428,17 @@ class TestShellThread(threading.Thread):
         self._test_stats.append(stats)
         return stats.failures
 
-    def _ensure_test_shell_is_running(self):
-        """Start the shared test shell, if it's not running.  Not for use when
-        running tests singly, since those each start a separate test shell in
+    def _ensure_dump_render_tree_is_running(self):
+        """Start the shared DumpRenderTree, if it's not running.  Not for use when
+        running tests singly, since those each start a separate DumpRenderTree in
         their own thread.
         """
         if (not self._driver or self._driver.poll() is not None):
             self._driver = self._port.start_driver(
                 self._image_path, self._shell_args)
 
-    def _kill_test_shell(self):
-        """Kill the test shell process if it's running."""
+    def _kill_dump_render_tree(self):
+        """Kill the DumpRenderTree process if it's running."""
         if self._driver:
             self._driver.stop()
             self._driver = None
index 17268ab..4619cfd 100644 (file)
@@ -271,8 +271,8 @@ class TestExpectationsFile:
                                 IMAGE: ('image mismatch', 'image mismatch'),
                                 IMAGE_PLUS_TEXT: ('image and text mismatch',
                                                   'image and text mismatch'),
-                                CRASH: ('test shell crash',
-                                        'test shell crashes'),
+                                CRASH: ('DumpRenderTree crash',
+                                        'DumpRenderTree crashes'),
                                 TIMEOUT: ('test timed out', 'tests timed out'),
                                 MISSING: ('no expected result found',
                                           'no expected results found')}
index 56d7b5a..022973a 100644 (file)
@@ -79,8 +79,8 @@ class TestFailure(object):
         """Returns an HTML string to be included on the results.html page."""
         raise NotImplemented
 
-    def should_kill_test_shell(self):
-        """Returns True if we should kill the test shell before the next
+    def should_kill_dump_render_tree(self):
+        """Returns True if we should kill DumpRenderTree before the next
         test."""
         return False
 
@@ -145,7 +145,7 @@ class FailureWithType(TestFailure):
 
 
 class FailureTimeout(TestFailure):
-    """Test timed out.  We also want to restart the test shell if this
+    """Test timed out.  We also want to restart DumpRenderTree if this
     happens."""
 
     @staticmethod
@@ -155,7 +155,7 @@ class FailureTimeout(TestFailure):
     def result_html_output(self, filename):
         return "<strong>%s</strong>" % self.message()
 
-    def should_kill_test_shell(self):
+    def should_kill_dump_render_tree(self):
         return True
 
 
@@ -172,7 +172,7 @@ class FailureCrash(TestFailure):
         return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
                                                              stack)
 
-    def should_kill_test_shell(self):
+    def should_kill_dump_render_tree(self):
         return True
 
 
index be95849..4435760 100644 (file)
@@ -76,8 +76,8 @@ class ChromiumPort(base.Port):
 
     def check_build(self, needs_http):
         result = True
-        test_shell_binary_path = self._path_to_driver()
-        result = check_file_exists(test_shell_binary_path,
+        dump_render_tree_binary_path = self._path_to_driver()
+        result = check_file_exists(dump_render_tree_binary_path,
                                    'test driver')
         if result:
             result = (self._check_driver_build_up_to_date(self._options.configuration)
@@ -97,8 +97,8 @@ class ChromiumPort(base.Port):
         return result
 
     def check_sys_deps(self, needs_http):
-        test_shell_binary_path = self._path_to_driver()
-        proc = subprocess.Popen([test_shell_binary_path,
+        dump_render_tree_binary_path = self._path_to_driver()
+        proc = subprocess.Popen([dump_render_tree_binary_path,
                                 '--check-layout-test-sys-deps'])
         if proc.wait():
             _log.error('System dependencies check failed.')
@@ -134,8 +134,8 @@ class ChromiumPort(base.Port):
 
     def setup_test_run(self):
         # Delete the disk cache if any to ensure a clean test run.
-        test_shell_binary_path = self._path_to_driver()
-        cachedir = os.path.split(test_shell_binary_path)[0]
+        dump_render_tree_binary_path = self._path_to_driver()
+        cachedir = os.path.split(dump_render_tree_binary_path)[0]
         cachedir = os.path.join(cachedir, "cache")
         if os.path.exists(cachedir):
             shutil.rmtree(cachedir)
@@ -213,7 +213,7 @@ class ChromiumPort(base.Port):
                 if (debug_mtime > release_mtime and configuration == 'Release' or
                     release_mtime > debug_mtime and configuration == 'Debug'):
                     _log.warning('You are not running the most '
-                                 'recent test_shell binary. You need to '
+                                 'recent DumpRenderTree binary. You need to '
                                  'pass --debug or not to select between '
                                  'Debug and Release.')
                     _log.warning('')
@@ -256,7 +256,7 @@ class ChromiumDriver(base.Driver):
             cmd += options
 
         # We need to pass close_fds=True to work around Python bug #2320
-        # (otherwise we can hang when we kill test_shell when we are running
+        # (otherwise we can hang when we kill DumpRenderTree when we are running
         # multiple threads). See http://bugs.python.org/issue2320 .
         # Note that close_fds isn't supported on Windows, but this bug only
         # shows up on Mac and Linux.
@@ -294,7 +294,7 @@ class ChromiumDriver(base.Driver):
             if line == '' and self.poll() is not None:
                 # This is hex code 0xc000001d, which is used for abrupt
                 # termination. This happens if we hit ctrl+c from the prompt
-                # and we happen to be waiting on the test_shell.
+                # and we happen to be waiting on the DumpRenderTree.
                 # sdoyon: Not sure for which OS and in what circumstances the
                 # above code is valid. What works for me under Linux to detect
                 # ctrl+c is for the subprocess returncode to be negative
@@ -336,8 +336,8 @@ class ChromiumDriver(base.Driver):
             if sys.platform not in ('win32', 'cygwin'):
                 # Closing stdin/stdout/stderr hangs sometimes on OS X,
                 # (see __init__(), above), and anyway we don't want to hang
-                # the harness if test_shell is buggy, so we wait a couple
-                # seconds to give test_shell a chance to clean up, but then
+                # the harness if DumpRenderTree is buggy, so we wait a couple
+                # seconds to give DumpRenderTree a chance to clean up, but then
                 # force-kill the process if necessary.
                 KILL_TIMEOUT = 3.0
                 timeout = time.time() + KILL_TIMEOUT
index d072587..f1c6d73 100644 (file)
@@ -86,7 +86,7 @@ class ServerProcess:
         if self.crashed:
             # This is hex code 0xc000001d, which is used for abrupt
             # termination. This happens if we hit ctrl+c from the prompt
-            # and we happen to be waiting on the test_shell.
+            # and we happen to be waiting on the DumpRenderTree.
             # sdoyon: Not sure for which OS and in what circumstances the
             # above code is valid. What works for me under Linux to detect
             # ctrl+c is for the subprocess returncode to be negative
@@ -205,8 +205,8 @@ class ServerProcess:
         if sys.platform not in ('win32', 'cygwin'):
             # Closing stdin/stdout/stderr hangs sometimes on OS X,
             # (see restart(), above), and anyway we don't want to hang
-            # the harness if test_shell is buggy, so we wait a couple
-            # seconds to give test_shell a chance to clean up, but then
+            # the harness if DumpRenderTree is buggy, so we wait a couple
+            # seconds to give DumpRenderTree a chance to clean up, but then
             # force-kill the process if necessary.
             KILL_TIMEOUT = 3.0
             timeout = time.time() + KILL_TIMEOUT
index d685442..e5a0108 100755 (executable)
@@ -27,7 +27,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"""Run layout tests using the test_shell.
+"""Run layout tests using DumpRenderTree.
 
 This is a port of the existing webkit test script run-webkit-tests.
 
@@ -64,7 +64,7 @@ from layout_package import test_expectations
 from layout_package import json_layout_results_generator
 from layout_package import metered_stream
 from layout_package import test_failures
-from layout_package import test_shell_thread
+from layout_package import dump_render_tree_thread
 from layout_package import test_files
 from test_types import fuzzy_image_diff
 from test_types import image_diff
@@ -164,7 +164,7 @@ class TestRunner:
 
     # The per-test timeout in milliseconds, if no --time-out-ms option was
     # given to run_webkit_tests. This should correspond to the default timeout
-    # in test_shell.exe.
+    # in DumpRenderTree.
     DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
 
     NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
@@ -475,8 +475,8 @@ class TestRunner:
             filename_queue.put(item)
         return filename_queue
 
-    def _get_test_shell_args(self, index):
-        """Returns the tuple of arguments for tests and for test_shell."""
+    def _get_dump_render_tree_args(self, index):
+        """Returns the tuple of arguments for tests and for DumpRenderTree."""
         shell_args = []
         test_args = test_type_base.TestArguments()
         png_path = None
@@ -504,7 +504,7 @@ class TestRunner:
                 return True
         return False
 
-    def _instantiate_test_shell_threads(self, test_files, result_summary):
+    def _instantiate_dump_render_tree_threads(self, test_files, result_summary):
         """Instantitates and starts the TestShellThread(s).
 
         Return:
@@ -514,22 +514,22 @@ class TestRunner:
 
         # Instantiate TestShellThreads and start them.
         threads = []
-        for i in xrange(int(self._options.num_test_shells)):
+        for i in xrange(int(self._options.num_dump_render_trees)):
             # Create separate TestTypes instances for each thread.
             test_types = []
             for t in self._test_types:
                 test_types.append(t(self._port,
                                     self._options.results_directory))
 
-            test_args, png_path, shell_args = self._get_test_shell_args(i)
-            thread = test_shell_thread.TestShellThread(self._port,
-                                                       filename_queue,
-                                                       self._result_queue,
-                                                       test_types,
-                                                       test_args,
-                                                       png_path,
-                                                       shell_args,
-                                                       self._options)
+            test_args, png_path, shell_args = self._get_dump_render_tree_args(i)
+            thread = dump_render_tree_thread.TestShellThread(self._port,
+                                                             filename_queue,
+                                                             self._result_queue,
+                                                             test_types,
+                                                             test_args,
+                                                             png_path,
+                                                             shell_args,
+                                                             self._options)
             if self._is_single_threaded():
                 thread.run_in_main_thread(self, result_summary)
             else:
@@ -540,7 +540,7 @@ class TestRunner:
 
     def _is_single_threaded(self):
         """Returns whether we should run all the tests in the main thread."""
-        return int(self._options.num_test_shells) == 1
+        return int(self._options.num_dump_render_trees) == 1
 
     def _run_tests(self, file_list, result_summary):
         """Runs the tests in the file_list.
@@ -556,9 +556,9 @@ class TestRunner:
               in the form {filename:filename, test_run_time:test_run_time}
             result_summary: summary object to populate with the results
         """
-        self._meter.update('Starting test shells ...')
-        threads = self._instantiate_test_shell_threads(file_list,
-                                                       result_summary)
+        self._meter.update('Starting DumpRenderTrees ...')
+        threads = self._instantiate_dump_render_tree_threads(file_list,
+                                                             result_summary)
 
         # Wait for the threads to finish and collect test failures.
         failures = {}
@@ -683,7 +683,7 @@ class TestRunner:
                              individual_test_timings)
 
         # Write the summary to disk (results.html) and maybe open the
-        # test_shell to this file.
+        # DumpRenderTree to this file.
         wrote_results = self._write_results_html_file(result_summary)
         if not self._options.noshow_results and wrote_results:
             self._show_results_html_file()
@@ -957,7 +957,7 @@ class TestRunner:
                   (t['name'], t['num_tests'], t['total_time']))
             cuml_time += t['total_time']
         write("   %6.2f cumulative, %6.2f optimal" %
-              (cuml_time, cuml_time / int(self._options.num_test_shells)))
+              (cuml_time, cuml_time / int(self._options.num_dump_render_trees)))
         write("")
 
         self._print_aggregate_test_statistics(write, individual_test_timings)
@@ -970,18 +970,18 @@ class TestRunner:
         Args:
           write: A callback to write info to (e.g., a LoggingWriter) or
               sys.stdout.write.
-          individual_test_timings: List of test_shell_thread.TestStats for all
+          individual_test_timings: List of dump_render_tree_thread.TestStats for all
               tests.
         """
         test_types = individual_test_timings[0].time_for_diffs.keys()
-        times_for_test_shell = []
+        times_for_dump_render_tree = []
         times_for_diff_processing = []
         times_per_test_type = {}
         for test_type in test_types:
             times_per_test_type[test_type] = []
 
         for test_stats in individual_test_timings:
-            times_for_test_shell.append(test_stats.test_run_time)
+            times_for_dump_render_tree.append(test_stats.test_run_time)
             times_for_diff_processing.append(
                 test_stats.total_time_for_all_diffs)
             time_for_diffs = test_stats.time_for_diffs
@@ -990,7 +990,7 @@ class TestRunner:
                     time_for_diffs[test_type])
 
         self._print_statistics_for_test_timings(write,
-            "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell)
+            "PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
         self._print_statistics_for_test_timings(write,
             "PER TEST DIFF PROCESSING TIMES (seconds):",
             times_for_diff_processing)
@@ -1005,11 +1005,11 @@ class TestRunner:
         Args:
           write: A callback to write info to (e.g., a LoggingWriter) or
               sys.stdout.write.
-          individual_test_timings: List of test_shell_thread.TestStats for all
+          individual_test_timings: List of dump_render_tree_thread.TestStats for all
               tests.
           result_summary: summary object for test run
         """
-        # Reverse-sort by the time spent in test_shell.
+        # Reverse-sort by the time spent in DumpRenderTree.
         individual_test_timings.sort(lambda a, b:
             cmp(b.test_run_time, a.test_run_time))
 
@@ -1330,7 +1330,7 @@ class TestRunner:
         return True
 
     def _show_results_html_file(self):
-        """Launches the test shell open to the results.html page."""
+        """Shows the results.html page."""
         results_filename = os.path.join(self._options.results_directory,
                                         "results.html")
         self._port.show_results_html_file(results_filename)
@@ -1421,12 +1421,12 @@ def main(options, args):
                 shutil.rmtree(os.path.join(options.results_directory, dirname),
                               ignore_errors=True)
 
-    if not options.num_test_shells:
+    if not options.num_dump_render_trees:
         # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1.
-        options.num_test_shells = port_obj.num_cores()
+        options.num_dump_render_trees = port_obj.num_cores()
 
     write = create_logging_writer(options, 'config')
-    write("Running %s test_shells in parallel" % options.num_test_shells)
+    write("Running %s DumpRenderTrees in parallel" % options.num_dump_render_trees)
 
     if not options.time_out_ms:
         if options.configuration == "Debug":
@@ -1543,7 +1543,7 @@ def parse_args(args=None):
                                   " into the platform directory, overwriting "
                                   "whatever's already there.")
     option_parser.add_option("", "--noshow-results", action="store_true",
-                             default=False, help="don't launch the test_shell"
+                             default=False, help="don't launch DumpRenderTree"
                              " with results after the tests are done")
     option_parser.add_option("", "--full-results-html", action="store_true",
                              default=False, help="show all failures in "
@@ -1559,8 +1559,8 @@ def parse_args(args=None):
                              default=False,
                              help="Run all tests, even those marked SKIP "
                                   "in the test list")
-    option_parser.add_option("", "--num-test-shells",
-                             help="Number of testshells to run in parallel.")
+    option_parser.add_option("", "--num-dump_render_trees",
+                             help="Number of DumpRenderTrees to run in parallel.")
     option_parser.add_option("", "--use-apache", action="store_true",
                              default=False,
                              help="Whether to use apache instead of lighttpd.")
@@ -1568,7 +1568,7 @@ def parse_args(args=None):
                              help="Set the timeout for each test")
     option_parser.add_option("", "--run-singly", action="store_true",
                              default=False,
-                             help="run a separate test_shell for each test")
+                             help="run a separate DumpRenderTree for each test")
     option_parser.add_option("", "--num-slow-tests-to-log", default=50,
                              help="Number of slow tests whose timings "
                                   "to print.")
@@ -1597,13 +1597,13 @@ def parse_args(args=None):
                                   "test (implies --verbose)")
     option_parser.add_option("", "--startup-dialog", action="store_true",
                              default=False,
-                             help="create a dialog on test_shell.exe startup")
+                             help="create a dialog on DumpRenderTree startup")
     option_parser.add_option("", "--gp-fault-error-box", action="store_true",
                              default=False,
                              help="enable Windows GP fault error box")
     option_parser.add_option("", "--wrapper",
                              help="wrapper command to insert before "
-                                  "invocations of test_shell; option is split "
+                                  "invocations of DumpRenderTree; option is split "
                                   "on whitespace before running. (Example: "
                                   "--wrapper='valgrind --smc-check=all')")
     option_parser.add_option("", "--test-list", action="append",
@@ -1628,7 +1628,7 @@ def parse_args(args=None):
     option_parser.add_option("", "--batch-size",
                              default=None,
                              help=("Run a the tests in batches (n), after "
-                                   "every n tests, the test shell is "
+                                   "every n tests, DumpRenderTree is "
                                    "relaunched."))
     option_parser.add_option("", "--builder-name",
                              default="DUMMY_BUILDER_NAME",