862cc6e113919797cf4ffcb00839c2222a3407cc
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import codecs
32 import json
33 import logging
34 import os
35 import platform
36 import Queue
37 import re
38 import StringIO
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.port import Port
54 from webkitpy.port import test
55 from webkitpy.test.skip import skip_if
56 from webkitpy.tool.mocktool import MockOptions
57
58
59 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
60     extra_args = extra_args or []
61     args = []
62     if not '--platform' in extra_args:
63         args.extend(['--platform', 'test'])
64     if not new_results:
65         args.append('--no-new-test-results')
66
67     if not '--child-processes' in extra_args:
68         args.extend(['--child-processes', 1])
69     args.extend(extra_args)
70     if not tests_included:
71         # We use the glob to test that globbing works.
72         args.extend(['passes',
73                      'http/tests',
74                      'websocket/tests',
75                      'failures/expected/*'])
76     return run_webkit_tests.parse_args(args)
77
78
79 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
80     options, parsed_args = parse_args(extra_args, tests_included)
81     if not port_obj:
82         host = host or MockHost()
83         port_obj = host.port_factory.get(port_name=options.platform, options=options)
84
85     if shared_port:
86         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
87
88     logging_stream = StringIO.StringIO()
89     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
90     return run_details.exit_code == 0
91
92
93 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
94     options, parsed_args = parse_args(extra_args=extra_args,
95                                       tests_included=tests_included,
96                                       print_nothing=False, new_results=new_results)
97     host = host or MockHost()
98     if not port_obj:
99         port_obj = host.port_factory.get(port_name=options.platform, options=options)
100
101     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
102     return (run_details, output, host.user)
103
104
105 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
106     if shared_port:
107         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
108     oc = outputcapture.OutputCapture()
109     try:
110         oc.capture_output()
111         logging_stream = StringIO.StringIO()
112         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
113     finally:
114         oc.restore_output()
115     return (run_details, logging_stream)
116
117
118 def get_tests_run(args, host=None):
119     results = get_test_results(args, host)
120     return [result.test_name for result in results]
121
122
123 def get_test_batches(args, host=None):
124     results = get_test_results(args, host)
125     batches = []
126     batch = []
127     current_pid = None
128     for result in results:
129         if batch and result.pid != current_pid:
130             batches.append(batch)
131             batch = []
132         batch.append(result.test_name)
133     if batch:
134         batches.append(batch)
135     return batches
136
137
138 def get_test_results(args, host=None):
139     options, parsed_args = parse_args(args, tests_included=True)
140
141     host = host or MockHost()
142     port_obj = host.port_factory.get(port_name=options.platform, options=options)
143
144     oc = outputcapture.OutputCapture()
145     oc.capture_output()
146     logging_stream = StringIO.StringIO()
147     try:
148         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
149     finally:
150         oc.restore_output()
151
152     all_results = []
153     if run_details.initial_results:
154         all_results.extend(run_details.initial_results.all_results)
155
156     if run_details.retry_results:
157         all_results.extend(run_details.retry_results.all_results)
158     return all_results
159
160
161 def parse_full_results(full_results_text):
162     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
163     compressed_results = json.loads(json_to_eval)
164     return compressed_results
165
166
167 class StreamTestingMixin(object):
168     def assertContains(self, stream, string):
169         self.assertTrue(string in stream.getvalue())
170
171     def assertEmpty(self, stream):
172         self.assertFalse(stream.getvalue())
173
174     def assertNotEmpty(self, stream):
175         self.assertTrue(stream.getvalue())
176
177
178 class RunTest(unittest.TestCase, StreamTestingMixin):
179     def setUp(self):
180         # A real PlatformInfo object is used here instead of a
181         # MockPlatformInfo because we need to actually check for
182         # Windows and Mac to skip some tests.
183         self._platform = SystemHost().platform
184
185         # FIXME: Remove this when we fix test-webkitpy to work
186         # properly on cygwin (bug 63846).
187         self.should_test_processes = not self._platform.is_win()
188
189     def test_basic(self):
190         options, args = parse_args(tests_included=True)
191         logging_stream = StringIO.StringIO()
192         host = MockHost()
193         port_obj = host.port_factory.get(options.platform, options)
194         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
195
196         # These numbers will need to be updated whenever we add new tests.
197         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
198         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
199         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
200         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
201         self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
202
203         one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
204             details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
205             len(details.initial_results.unexpected_results_by_name))
206         self.assertTrue(one_line_summary in logging_stream.buflist)
207
208         # Ensure the results were summarized properly.
209         self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
210
211         # Ensure the image diff percentage is in the results.
212         self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
213
214         # Ensure the results were written out and displayed.
215         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
216         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
217         self.assertEqual(json.loads(json_to_eval), details.summarized_results)
218
219         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
220
221
222     def test_batch_size(self):
223         batch_tests_run = get_test_batches(['--batch-size', '2'])
224         for batch in batch_tests_run:
225             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
226
227     def test_max_locked_shards(self):
228         # Tests for the default of using one locked shard even in the case of more than one child process.
229         if not self.should_test_processes:
230             return
231         save_env_webkit_test_max_locked_shards = None
232         if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
233             save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
234             del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
235         _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
236         try:
237             self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
238         finally:
239             if save_env_webkit_test_max_locked_shards:
240                 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
241
242     def test_child_processes_2(self):
243         if self.should_test_processes:
244             _, regular_output, _ = logging_run(
245                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
246             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
247
248     def test_child_processes_min(self):
249         if self.should_test_processes:
250             _, regular_output, _ = logging_run(
251                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
252                 tests_included=True, shared_port=False)
253             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
254
255     def test_dryrun(self):
256         tests_run = get_tests_run(['--dry-run'])
257         self.assertEqual(tests_run, [])
258
259         tests_run = get_tests_run(['-n'])
260         self.assertEqual(tests_run, [])
261
262     def test_exception_raised(self):
263         # Exceptions raised by a worker are treated differently depending on
264         # whether they are in-process or out. inline exceptions work as normal,
265         # which allows us to get the full stack trace and traceback from the
266         # worker. The downside to this is that it could be any error, but this
267         # is actually useful in testing.
268         #
269         # Exceptions raised in a separate process are re-packaged into
270         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
271         # be printed, but don't display properly in the unit test exception handlers.
272         self.assertRaises(BaseException, logging_run,
273             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
274
275         if self.should_test_processes:
276             self.assertRaises(BaseException, logging_run,
277                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
278
279     def test_full_results_html(self):
280         # FIXME: verify html?
281         details, _, _ = logging_run(['--full-results-html'])
282         self.assertEqual(details.exit_code, 0)
283
284     def test_hung_thread(self):
285         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
286         # Note that hang.html is marked as WontFix and all WontFix tests are
287         # expected to Pass, so that actually running them generates an "unexpected" error.
288         self.assertEqual(details.exit_code, 1)
289         self.assertNotEmpty(err)
290
291     def test_keyboard_interrupt(self):
292         # Note that this also tests running a test marked as SKIP if
293         # you specify it explicitly.
294         self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
295
296         if self.should_test_processes:
297             self.assertRaises(KeyboardInterrupt, logging_run,
298                 ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
299
300     def test_no_tests_found(self):
301         details, err, _ = logging_run(['resources'], tests_included=True)
302         self.assertEqual(details.exit_code, -1)
303         self.assertContains(err, 'No tests to run.\n')
304
305     def test_no_tests_found_2(self):
306         details, err, _ = logging_run(['foo'], tests_included=True)
307         self.assertEqual(details.exit_code, -1)
308         self.assertContains(err, 'No tests to run.\n')
309
310     def test_natural_order(self):
311         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
312         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
313         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
314
315     def test_natural_order_test_specified_multiple_times(self):
316         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
317         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
318         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
319
320     def test_random_order(self):
321         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
322         tests_run = get_tests_run(['--order=random'] + tests_to_run)
323         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
324
325     def test_random_order_test_specified_multiple_times(self):
326         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
327         tests_run = get_tests_run(['--order=random'] + tests_to_run)
328         self.assertEqual(tests_run.count('passes/audio.html'), 2)
329         self.assertEqual(tests_run.count('passes/args.html'), 2)
330
331     def test_no_order(self):
332         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
333         tests_run = get_tests_run(['--order=none'] + tests_to_run)
334         self.assertEqual(tests_to_run, tests_run)
335
336     def test_no_order_test_specified_multiple_times(self):
337         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
338         tests_run = get_tests_run(['--order=none'] + tests_to_run)
339         self.assertEqual(tests_to_run, tests_run)
340
341     def test_no_order_with_directory_entries_in_natural_order(self):
342         tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
343         tests_run = get_tests_run(['--order=none'] + tests_to_run)
344         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
345
346     def test_gc_between_tests(self):
347         self.assertTrue(passing_run(['--gc-between-tests']))
348
349     def test_complex_text(self):
350         self.assertTrue(passing_run(['--complex-text']))
351
352     def test_threaded(self):
353         self.assertTrue(passing_run(['--threaded']))
354
355     def test_repeat_each(self):
356         tests_to_run = ['passes/image.html', 'passes/text.html']
357         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
358         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
359
360     def test_ignore_flag(self):
361         # Note that passes/image.html is expected to be run since we specified it directly.
362         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
363         self.assertFalse('passes/text.html' in tests_run)
364         self.assertTrue('passes/image.html' in tests_run)
365
366     def test_skipped_flag(self):
367         tests_run = get_tests_run(['passes'])
368         self.assertFalse('passes/skipped/skip.html' in tests_run)
369         num_tests_run_by_default = len(tests_run)
370
371         # Check that nothing changes when we specify skipped=default.
372         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
373                           num_tests_run_by_default)
374
375         # Now check that we run one more test (the skipped one).
376         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
377         self.assertTrue('passes/skipped/skip.html' in tests_run)
378         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
379
380         # Now check that we only run the skipped test.
381         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
382
383         # Now check that we don't run anything.
384         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
385
386     def test_iterations(self):
387         tests_to_run = ['passes/image.html', 'passes/text.html']
388         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
389         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
390
391     def test_repeat_each_iterations_num_tests(self):
392         # The total number of tests should be: number_of_tests *
393         # repeat_each * iterations
394         host = MockHost()
395         _, err, _ = logging_run(
396             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
397             tests_included=True, host=host)
398         self.assertContains(err, "All 16 tests ran as expected.\n")
399
400     def test_run_chunk(self):
401         # Test that we actually select the right chunk
402         all_tests_run = get_tests_run(['passes', 'failures'])
403         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
404         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
405
406         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
407         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
408         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
409         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
410
411     def test_run_force(self):
412         # This raises an exception because we run
413         # failures/expected/exception.html, which is normally SKIPped.
414
415         self.assertRaises(ValueError, logging_run, ['--force'])
416
417     def test_run_part(self):
418         # Test that we actually select the right part
419         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
420         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
421         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
422
423         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
424         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
425         # last part repeats the first two tests).
426         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
427         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
428
429     def test_run_singly(self):
430         batch_tests_run = get_test_batches(['--run-singly'])
431         for batch in batch_tests_run:
432             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
433
434     def test_skip_failing_tests(self):
435         # This tests that we skip both known failing and known flaky tests. Because there are
436         # no known flaky tests in the default test_expectations, we add additional expectations.
437         host = MockHost()
438         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
439
440         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
441         has_passes_text = False
442         for batch in batches:
443             self.assertFalse('failures/expected/text.html' in batch)
444             self.assertFalse('passes/image.html' in batch)
445             has_passes_text = has_passes_text or ('passes/text.html' in batch)
446         self.assertTrue(has_passes_text)
447
448     def test_run_singly_actually_runs_tests(self):
449         details, _, _ = logging_run(['--run-singly'], tests_included=True)
450         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
451
452     def test_single_file(self):
453         tests_run = get_tests_run(['passes/text.html'])
454         self.assertEqual(tests_run, ['passes/text.html'])
455
456     def test_single_file_with_prefix(self):
457         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
458         self.assertEqual(['passes/text.html'], tests_run)
459
460     def test_single_skipped_file(self):
461         tests_run = get_tests_run(['failures/expected/keybaord.html'])
462         self.assertEqual([], tests_run)
463
464     def test_stderr_is_saved(self):
465         host = MockHost()
466         self.assertTrue(passing_run(host=host))
467         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
468                           'stuff going to stderr')
469
470     def test_test_list(self):
471         host = MockHost()
472         filename = '/tmp/foo.txt'
473         host.filesystem.write_text_file(filename, 'passes/text.html')
474         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
475         self.assertEqual(['passes/text.html'], tests_run)
476         host.filesystem.remove(filename)
477         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
478         self.assertEqual(details.exit_code, -1)
479         self.assertNotEmpty(err)
480
481     def test_test_list_with_prefix(self):
482         host = MockHost()
483         filename = '/tmp/foo.txt'
484         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
485         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
486         self.assertEqual(['passes/text.html'], tests_run)
487
488     def test_missing_and_unexpected_results(self):
489         # Test that we update expectations in place. If the expectation
490         # is missing, update the expected generic location.
491         host = MockHost()
492         details, err, _ = logging_run(['--no-show-results',
493             'failures/expected/missing_image.html',
494             'failures/unexpected/missing_text.html',
495             'failures/unexpected/text-image-checksum.html'],
496             tests_included=True, host=host)
497         file_list = host.filesystem.written_files.keys()
498         self.assertEqual(details.exit_code, 1)
499         expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
500         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
501         self.assertTrue(json_string.find(expected_token) != -1)
502         self.assertTrue(json_string.find('"num_regressions":1') != -1)
503         self.assertTrue(json_string.find('"num_flaky":0') != -1)
504         self.assertTrue(json_string.find('"num_missing":1') != -1)
505
506     def test_pixel_test_directories(self):
507         host = MockHost()
508
509         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
510         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
511                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
512                 'failures/unexpected/image_not_in_pixeldir.html']
513         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
514
515         self.assertEqual(details.exit_code, 1)
516         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
517         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
518         self.assertTrue(json_string.find(expected_token) != -1)
519
520     def test_missing_and_unexpected_results_with_custom_exit_code(self):
521         # Test that we update expectations in place. If the expectation
522         # is missing, update the expected generic location.
523         class CustomExitCodePort(test.TestPort):
524             def exit_code_from_summarized_results(self, unexpected_results):
525                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
526
527         host = MockHost()
528         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
529         test_port = CustomExitCodePort(host, options=options)
530         details, err, _ = logging_run(['--no-show-results',
531             'failures/expected/missing_image.html',
532             'failures/unexpected/missing_text.html',
533             'failures/unexpected/text-image-checksum.html'],
534             tests_included=True, host=host, port_obj=test_port)
535         self.assertEqual(details.exit_code, 2)
536
537     def test_crash_with_stderr(self):
538         host = MockHost()
539         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
540         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
541
542     def test_no_image_failure_with_image_diff(self):
543         host = MockHost()
544         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
545         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
546
547     def test_crash_log(self):
548         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
549         # Currently CrashLog uploading only works on Darwin.
550         if not self._platform.is_mac():
551             return
552         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
553         host = MockHost()
554         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
555         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
556         expected_crash_log = mock_crash_report
557         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
558
559     def test_web_process_crash_log(self):
560         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
561         # Currently CrashLog uploading only works on Darwin.
562         if not self._platform.is_mac():
563             return
564         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
565         host = MockHost()
566         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
567         logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
568         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
569
570     def test_exit_after_n_failures_upload(self):
571         host = MockHost()
572         details, regular_output, user = logging_run(
573            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
574            tests_included=True, host=host)
575
576         # By returning False, we know that the incremental results were generated and then deleted.
577         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
578
579         # This checks that we report only the number of tests that actually failed.
580         self.assertEqual(details.exit_code, 1)
581
582         # This checks that passes/text.html is considered SKIPped.
583         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
584
585         # This checks that we told the user we bailed out.
586         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
587
588         # This checks that neither test ran as expected.
589         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
590         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
591
592     def test_exit_after_n_failures(self):
593         # Unexpected failures should result in tests stopping.
594         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
595         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
596
597         # But we'll keep going for expected ones.
598         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
599         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
600
601     def test_exit_after_n_crashes(self):
602         # Unexpected crashes should result in tests stopping.
603         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
604         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
605
606         # Same with timeouts.
607         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
608         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
609
610         # But we'll keep going for expected ones.
611         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
612         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
613
614     def test_results_directory_absolute(self):
615         # We run a configuration that should fail, to generate output, then
616         # look for what the output results url was.
617
618         host = MockHost()
619         with host.filesystem.mkdtemp() as tmpdir:
620             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
621             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
622
623     def test_results_directory_default(self):
624         # We run a configuration that should fail, to generate output, then
625         # look for what the output results url was.
626
627         # This is the default location.
628         _, _, user = logging_run(tests_included=True)
629         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
630
631     def test_results_directory_relative(self):
632         # We run a configuration that should fail, to generate output, then
633         # look for what the output results url was.
634         host = MockHost()
635         host.filesystem.maybe_make_directory('/tmp/cwd')
636         host.filesystem.chdir('/tmp/cwd')
637         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
638         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
639
640     def test_retrying_and_flaky_tests(self):
641         host = MockHost()
642         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
643         self.assertEqual(details.exit_code, 0)
644         self.assertTrue('Retrying' in err.getvalue())
645         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
646         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
647
648         # Now we test that --clobber-old-results does remove the old entries and the old retries,
649         # and that we don't retry again.
650         host = MockHost()
651         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
652         self.assertEqual(details.exit_code, 1)
653         self.assertTrue('Clobbering old results' in err.getvalue())
654         self.assertTrue('flaky/text.html' in err.getvalue())
655         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
656         self.assertFalse(host.filesystem.exists('retries'))
657
658     def test_retrying_force_pixel_tests(self):
659         host = MockHost()
660         details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
661         self.assertEqual(details.exit_code, 1)
662         self.assertTrue('Retrying' in err.getvalue())
663         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
664         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
665         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
666         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
667         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
668         json = parse_full_results(json_string)
669         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
670             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
671         self.assertFalse(json["pixel_tests_enabled"])
672         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
673
674     def test_retrying_uses_retries_directory(self):
675         host = MockHost()
676         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
677         self.assertEqual(details.exit_code, 1)
678         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
679         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
680
681     def test_run_order__inline(self):
682         # These next tests test that we run the tests in ascending alphabetical
683         # order per directory. HTTP tests are sharded separately from other tests,
684         # so we have to test both.
685         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
686         self.assertEqual(tests_run, sorted(tests_run))
687
688         tests_run = get_tests_run(['http/tests/passes'])
689         self.assertEqual(tests_run, sorted(tests_run))
690
691     def test_tolerance(self):
692         class ImageDiffTestPort(test.TestPort):
693             def diff_image(self, expected_contents, actual_contents, tolerance=None):
694                 self.tolerance_used_for_diff_image = self._options.tolerance
695                 return (True, 1, None)
696
697         def get_port_for_run(args):
698             options, parsed_args = run_webkit_tests.parse_args(args)
699             host = MockHost()
700             test_port = ImageDiffTestPort(host, options=options)
701             res = passing_run(args, port_obj=test_port, tests_included=True)
702             self.assertTrue(res)
703             return test_port
704
705         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
706
707         # If we pass in an explicit tolerance argument, then that will be used.
708         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
709         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
710         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
711         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
712
713         # Otherwise the port's default tolerance behavior (including ignoring it)
714         # should be used.
715         test_port = get_port_for_run(base_args)
716         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
717
718     def test_virtual(self):
719         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
720                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
721
722     def test_reftest_run(self):
723         tests_run = get_tests_run(['passes/reftest.html'])
724         self.assertEqual(['passes/reftest.html'], tests_run)
725
726     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
727         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
728         self.assertEqual(['passes/reftest.html'], tests_run)
729
730     def test_reftest_skip_reftests_if_no_ref_tests(self):
731         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
732         self.assertEqual([], tests_run)
733         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
734         self.assertEqual([], tests_run)
735
736     def test_reftest_expected_html_should_be_ignored(self):
737         tests_run = get_tests_run(['passes/reftest-expected.html'])
738         self.assertEqual([], tests_run)
739
740     def test_reftest_driver_should_run_expected_html(self):
741         tests_run = get_test_results(['passes/reftest.html'])
742         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
743
744     def test_reftest_driver_should_run_expected_mismatch_html(self):
745         tests_run = get_test_results(['passes/mismatch.html'])
746         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
747
748     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
749         host = MockHost()
750         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
751         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
752         self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
753         self.assertTrue(json_string.find('"num_regressions":4') != -1)
754         self.assertTrue(json_string.find('"num_flaky":0') != -1)
755         self.assertTrue(json_string.find('"num_missing":1') != -1)
756
757     def test_additional_platform_directory(self):
758         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
759         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
760         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
761         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
762
763     def test_additional_expectations(self):
764         host = MockHost()
765         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
766         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
767                                     tests_included=True, host=host))
768
769     def test_no_http_and_force(self):
770         # See test_run_force, using --force raises an exception.
771         # FIXME: We would like to check the warnings generated.
772         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
773
774     @staticmethod
775     def has_test_of_type(tests, type):
776         return [test for test in tests if type in test]
777
778     def test_no_http_tests(self):
779         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
780         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
781         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
782
783         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
784         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
785         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
786
787         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
788         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
789         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
790
791     def test_platform_tests_are_found(self):
792         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
793         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
794         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
795
796     def test_output_diffs(self):
797         # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
798         host = MockHost()
799         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
800         written_files = host.filesystem.written_files
801         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
802         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
803
804         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
805         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
806         self.assertEqual(full_results['has_pretty_patch'], False)
807
808     def test_unsupported_platform(self):
809         stdout = StringIO.StringIO()
810         stderr = StringIO.StringIO()
811         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
812
813         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
814         self.assertEqual(stdout.getvalue(), '')
815         self.assertTrue('unsupported platform' in stderr.getvalue())
816
817     def test_verbose_in_child_processes(self):
818         # When we actually run multiple processes, we may have to reconfigure logging in the
819         # child process (e.g., on win32) and we need to make sure that works and we still
820         # see the verbose log output. However, we can't use logging_run() because using
821         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
822
823         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
824         if not self.should_test_processes:
825             return
826
827         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
828         host = MockHost()
829         port_obj = host.port_factory.get(port_name=options.platform, options=options)
830         logging_stream = StringIO.StringIO()
831         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
832         self.assertTrue('text.html passed' in logging_stream.getvalue())
833         self.assertTrue('image.html passed' in logging_stream.getvalue())
834
835
836 class EndToEndTest(unittest.TestCase):
837     def test_reftest_with_two_notrefs(self):
838         # Test that we update expectations in place. If the expectation
839         # is missing, update the expected generic location.
840         host = MockHost()
841         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
842         file_list = host.filesystem.written_files.keys()
843
844         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
845         json = parse_full_results(json_string)
846         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
847         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
848         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
849         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
850             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"})
851         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
852             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"})
853         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
854             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"})
855
856
857 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
858     def assertBaselines(self, file_list, file, extensions, err):
859         "assert that the file_list contains the baselines."""
860         for ext in extensions:
861             baseline = file + "-expected" + ext
862             baseline_msg = 'Writing new expected result "%s"\n' % baseline
863             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
864             self.assertContains(err, baseline_msg)
865
866     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
867     # supposed to be.
868
869     def test_reset_results(self):
870         # Test that we update expectations in place. If the expectation
871         # is missing, update the expected generic location.
872         host = MockHost()
873         details, err, _ = logging_run(
874             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
875             tests_included=True, host=host, new_results=True)
876         file_list = host.filesystem.written_files.keys()
877         self.assertEqual(details.exit_code, 0)
878         self.assertEqual(len(file_list), 8)
879         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
880         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
881
882     def test_missing_results(self):
883         # Test that we update expectations in place. If the expectation
884         # is missing, update the expected generic location.
885         host = MockHost()
886         details, err, _ = logging_run(['--no-show-results',
887             'failures/unexpected/missing_text.html',
888             'failures/unexpected/missing_image.html',
889             'failures/unexpected/missing_audio.html',
890             'failures/unexpected/missing_render_tree_dump.html'],
891             tests_included=True, host=host, new_results=True)
892         file_list = host.filesystem.written_files.keys()
893         self.assertEqual(details.exit_code, 0)
894         self.assertEqual(len(file_list), 10)
895         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
896         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
897         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
898
899     def test_new_baseline(self):
900         # Test that we update the platform expectations in the version-specific directories
901         # for both existing and new baselines.
902         host = MockHost()
903         details, err, _ = logging_run(
904             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
905             tests_included=True, host=host, new_results=True)
906         file_list = host.filesystem.written_files.keys()
907         self.assertEqual(details.exit_code, 0)
908         self.assertEqual(len(file_list), 8)
909         self.assertBaselines(file_list,
910             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
911         self.assertBaselines(file_list,
912             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
913
914
915 class PortTest(unittest.TestCase):
916     def assert_mock_port_works(self, port_name, args=[]):
917         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
918
919     def disabled_test_mac_lion(self):
920         self.assert_mock_port_works('mac-lion')
921
922
923 class MainTest(unittest.TestCase):
924     def test_exception_handling(self):
925         orig_run_fn = run_webkit_tests.run
926
927         # unused args pylint: disable=W0613
928         def interrupting_run(port, options, args, stderr):
929             raise KeyboardInterrupt
930
931         def successful_run(port, options, args, stderr):
932
933             class FakeRunDetails(object):
934                 exit_code = -1
935
936             return FakeRunDetails()
937
938         def exception_raising_run(port, options, args, stderr):
939             assert False
940
941         stdout = StringIO.StringIO()
942         stderr = StringIO.StringIO()
943         try:
944             run_webkit_tests.run = interrupting_run
945             res = run_webkit_tests.main([], stdout, stderr)
946             self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
947
948             run_webkit_tests.run = successful_run
949             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
950             self.assertEqual(res, -1)
951
952             run_webkit_tests.run = exception_raising_run
953             res = run_webkit_tests.main([], stdout, stderr)
954             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
955         finally:
956             run_webkit_tests.run = orig_run_fn