Remove virtual test support from webkitpy
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import codecs
32 import json
33 import logging
34 import os
35 import platform
36 import Queue
37 import re
38 import StringIO
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.port import Port
54 from webkitpy.port import test
55 from webkitpy.test.skip import skip_if
56 from webkitpy.tool.mocktool import MockOptions
57
58
59 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
60     extra_args = extra_args or []
61     args = []
62     if not '--platform' in extra_args:
63         args.extend(['--platform', 'test'])
64     if not new_results:
65         args.append('--no-new-test-results')
66
67     if not '--child-processes' in extra_args:
68         args.extend(['--child-processes', 1])
69     args.extend(extra_args)
70     if not tests_included:
71         # We use the glob to test that globbing works.
72         args.extend(['passes',
73                      'http/tests',
74                      'websocket/tests',
75                      'failures/expected/*'])
76     return run_webkit_tests.parse_args(args)
77
78
79 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
80     options, parsed_args = parse_args(extra_args, tests_included)
81     if not port_obj:
82         host = host or MockHost()
83         port_obj = host.port_factory.get(port_name=options.platform, options=options)
84
85     if shared_port:
86         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
87
88     logging_stream = StringIO.StringIO()
89     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
90     return run_details.exit_code == 0
91
92
93 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
94     options, parsed_args = parse_args(extra_args=extra_args,
95                                       tests_included=tests_included,
96                                       print_nothing=False, new_results=new_results)
97     host = host or MockHost()
98     if not port_obj:
99         port_obj = host.port_factory.get(port_name=options.platform, options=options)
100
101     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
102     return (run_details, output, host.user)
103
104
105 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
106     if shared_port:
107         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
108     oc = outputcapture.OutputCapture()
109     try:
110         oc.capture_output()
111         logging_stream = StringIO.StringIO()
112         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
113     finally:
114         oc.restore_output()
115     return (run_details, logging_stream)
116
117
118 def get_tests_run(args, host=None):
119     results = get_test_results(args, host)
120     return [result.test_name for result in results]
121
122
123 def get_test_batches(args, host=None):
124     results = get_test_results(args, host)
125     batches = []
126     batch = []
127     current_pid = None
128     for result in results:
129         if batch and result.pid != current_pid:
130             batches.append(batch)
131             batch = []
132         batch.append(result.test_name)
133     if batch:
134         batches.append(batch)
135     return batches
136
137
138 def get_test_results(args, host=None):
139     options, parsed_args = parse_args(args, tests_included=True)
140
141     host = host or MockHost()
142     port_obj = host.port_factory.get(port_name=options.platform, options=options)
143
144     oc = outputcapture.OutputCapture()
145     oc.capture_output()
146     logging_stream = StringIO.StringIO()
147     try:
148         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
149     finally:
150         oc.restore_output()
151
152     all_results = []
153     if run_details.initial_results:
154         all_results.extend(run_details.initial_results.all_results)
155
156     if run_details.retry_results:
157         all_results.extend(run_details.retry_results.all_results)
158     return all_results
159
160
161 def parse_full_results(full_results_text):
162     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
163     compressed_results = json.loads(json_to_eval)
164     return compressed_results
165
166
167 class StreamTestingMixin(object):
168     def assertContains(self, stream, string):
169         self.assertTrue(string in stream.getvalue())
170
171     def assertEmpty(self, stream):
172         self.assertFalse(stream.getvalue())
173
174     def assertNotEmpty(self, stream):
175         self.assertTrue(stream.getvalue())
176
177
178 class RunTest(unittest.TestCase, StreamTestingMixin):
179     def setUp(self):
180         # A real PlatformInfo object is used here instead of a
181         # MockPlatformInfo because we need to actually check for
182         # Windows and Mac to skip some tests.
183         self._platform = SystemHost().platform
184
185         # FIXME: Remove this when we fix test-webkitpy to work
186         # properly on cygwin (bug 63846).
187         self.should_test_processes = not self._platform.is_win()
188
189     def test_basic(self):
190         options, args = parse_args(tests_included=True)
191         logging_stream = StringIO.StringIO()
192         host = MockHost()
193         port_obj = host.port_factory.get(options.platform, options)
194         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
195
196         # These numbers will need to be updated whenever we add new tests.
197         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
198         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
199         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
200         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
201         self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
202
203         one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
204             details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
205             len(details.initial_results.unexpected_results_by_name))
206         self.assertTrue(one_line_summary in logging_stream.buflist)
207
208         # Ensure the results were summarized properly.
209         self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
210
211         # Ensure the image diff percentage is in the results.
212         self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
213
214         # Ensure the results were written out and displayed.
215         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
216         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
217         self.assertEqual(json.loads(json_to_eval), details.summarized_results)
218
219         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
220
221
222     def test_batch_size(self):
223         batch_tests_run = get_test_batches(['--batch-size', '2'])
224         for batch in batch_tests_run:
225             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
226
227     def test_child_processes_2(self):
228         if self.should_test_processes:
229             _, regular_output, _ = logging_run(
230                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
231             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
232
233     def test_child_processes_min(self):
234         if self.should_test_processes:
235             _, regular_output, _ = logging_run(
236                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
237                 tests_included=True, shared_port=False)
238             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
239
240     def test_dryrun(self):
241         tests_run = get_tests_run(['--dry-run'])
242         self.assertEqual(tests_run, [])
243
244         tests_run = get_tests_run(['-n'])
245         self.assertEqual(tests_run, [])
246
247     def test_exception_raised(self):
248         # Exceptions raised by a worker are treated differently depending on
249         # whether they are in-process or out. inline exceptions work as normal,
250         # which allows us to get the full stack trace and traceback from the
251         # worker. The downside to this is that it could be any error, but this
252         # is actually useful in testing.
253         #
254         # Exceptions raised in a separate process are re-packaged into
255         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
256         # be printed, but don't display properly in the unit test exception handlers.
257         self.assertRaises(BaseException, logging_run,
258             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
259
260         if self.should_test_processes:
261             self.assertRaises(BaseException, logging_run,
262                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
263
264     def test_full_results_html(self):
265         # FIXME: verify html?
266         details, _, _ = logging_run(['--full-results-html'])
267         self.assertEqual(details.exit_code, 0)
268
269     def test_hung_thread(self):
270         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
271         # Note that hang.html is marked as WontFix and all WontFix tests are
272         # expected to Pass, so that actually running them generates an "unexpected" error.
273         self.assertEqual(details.exit_code, 1)
274         self.assertNotEmpty(err)
275
276     def test_keyboard_interrupt(self):
277         # Note that this also tests running a test marked as SKIP if
278         # you specify it explicitly.
279         self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
280
281         if self.should_test_processes:
282             self.assertRaises(KeyboardInterrupt, logging_run,
283                 ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
284
285     def test_no_tests_found(self):
286         details, err, _ = logging_run(['resources'], tests_included=True)
287         self.assertEqual(details.exit_code, -1)
288         self.assertContains(err, 'No tests to run.\n')
289
290     def test_no_tests_found_2(self):
291         details, err, _ = logging_run(['foo'], tests_included=True)
292         self.assertEqual(details.exit_code, -1)
293         self.assertContains(err, 'No tests to run.\n')
294
295     def test_natural_order(self):
296         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
297         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
298         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
299
300     def test_natural_order_test_specified_multiple_times(self):
301         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
302         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
303         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
304
305     def test_random_order(self):
306         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
307         tests_run = get_tests_run(['--order=random'] + tests_to_run)
308         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
309
310     def test_random_order_test_specified_multiple_times(self):
311         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
312         tests_run = get_tests_run(['--order=random'] + tests_to_run)
313         self.assertEqual(tests_run.count('passes/audio.html'), 2)
314         self.assertEqual(tests_run.count('passes/args.html'), 2)
315
316     def test_no_order(self):
317         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
318         tests_run = get_tests_run(['--order=none'] + tests_to_run)
319         self.assertEqual(tests_to_run, tests_run)
320
321     def test_no_order_test_specified_multiple_times(self):
322         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
323         tests_run = get_tests_run(['--order=none'] + tests_to_run)
324         self.assertEqual(tests_to_run, tests_run)
325
326     def test_no_order_with_directory_entries_in_natural_order(self):
327         tests_to_run = ['http/tests/ssl', 'http/tests/passes']
328         tests_run = get_tests_run(['--order=none'] + tests_to_run)
329         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
330
331     def test_gc_between_tests(self):
332         self.assertTrue(passing_run(['--gc-between-tests']))
333
334     def test_complex_text(self):
335         self.assertTrue(passing_run(['--complex-text']))
336
337     def test_threaded(self):
338         self.assertTrue(passing_run(['--threaded']))
339
340     def test_repeat_each(self):
341         tests_to_run = ['passes/image.html', 'passes/text.html']
342         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
343         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
344
345     def test_ignore_flag(self):
346         # Note that passes/image.html is expected to be run since we specified it directly.
347         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
348         self.assertFalse('passes/text.html' in tests_run)
349         self.assertTrue('passes/image.html' in tests_run)
350
351     def test_skipped_flag(self):
352         tests_run = get_tests_run(['passes'])
353         self.assertFalse('passes/skipped/skip.html' in tests_run)
354         num_tests_run_by_default = len(tests_run)
355
356         # Check that nothing changes when we specify skipped=default.
357         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
358                           num_tests_run_by_default)
359
360         # Now check that we run one more test (the skipped one).
361         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
362         self.assertTrue('passes/skipped/skip.html' in tests_run)
363         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
364
365         # Now check that we only run the skipped test.
366         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
367
368         # Now check that we don't run anything.
369         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
370
371     def test_iterations(self):
372         tests_to_run = ['passes/image.html', 'passes/text.html']
373         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
374         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
375
376     def test_repeat_each_iterations_num_tests(self):
377         # The total number of tests should be: number_of_tests *
378         # repeat_each * iterations
379         host = MockHost()
380         _, err, _ = logging_run(
381             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
382             tests_included=True, host=host)
383         self.assertContains(err, "All 16 tests ran as expected.\n")
384
385     def test_run_chunk(self):
386         # Test that we actually select the right chunk
387         all_tests_run = get_tests_run(['passes', 'failures'])
388         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
389         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
390
391         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
392         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
393         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
394         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
395
396     def test_run_force(self):
397         # This raises an exception because we run
398         # failures/expected/exception.html, which is normally SKIPped.
399
400         self.assertRaises(ValueError, logging_run, ['--force'])
401
402     def test_run_part(self):
403         # Test that we actually select the right part
404         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
405         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
406         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
407
408         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
409         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
410         # last part repeats the first two tests).
411         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
412         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
413
414     def test_run_singly(self):
415         batch_tests_run = get_test_batches(['--run-singly'])
416         for batch in batch_tests_run:
417             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
418
419     def test_skip_failing_tests(self):
420         # This tests that we skip both known failing and known flaky tests. Because there are
421         # no known flaky tests in the default test_expectations, we add additional expectations.
422         host = MockHost()
423         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
424
425         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
426         has_passes_text = False
427         for batch in batches:
428             self.assertFalse('failures/expected/text.html' in batch)
429             self.assertFalse('passes/image.html' in batch)
430             has_passes_text = has_passes_text or ('passes/text.html' in batch)
431         self.assertTrue(has_passes_text)
432
433     def test_run_singly_actually_runs_tests(self):
434         details, _, _ = logging_run(['--run-singly'], tests_included=True)
435         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
436
437     def test_single_file(self):
438         tests_run = get_tests_run(['passes/text.html'])
439         self.assertEqual(tests_run, ['passes/text.html'])
440
441     def test_single_file_with_prefix(self):
442         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
443         self.assertEqual(['passes/text.html'], tests_run)
444
445     def test_single_skipped_file(self):
446         tests_run = get_tests_run(['failures/expected/keybaord.html'])
447         self.assertEqual([], tests_run)
448
449     def test_stderr_is_saved(self):
450         host = MockHost()
451         self.assertTrue(passing_run(host=host))
452         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
453                           'stuff going to stderr')
454
455     def test_test_list(self):
456         host = MockHost()
457         filename = '/tmp/foo.txt'
458         host.filesystem.write_text_file(filename, 'passes/text.html')
459         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
460         self.assertEqual(['passes/text.html'], tests_run)
461         host.filesystem.remove(filename)
462         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
463         self.assertEqual(details.exit_code, -1)
464         self.assertNotEmpty(err)
465
466     def test_test_list_with_prefix(self):
467         host = MockHost()
468         filename = '/tmp/foo.txt'
469         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
470         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
471         self.assertEqual(['passes/text.html'], tests_run)
472
473     def test_missing_and_unexpected_results(self):
474         # Test that we update expectations in place. If the expectation
475         # is missing, update the expected generic location.
476         host = MockHost()
477         details, err, _ = logging_run(['--no-show-results',
478             'failures/expected/missing_image.html',
479             'failures/unexpected/missing_text.html',
480             'failures/unexpected/text-image-checksum.html'],
481             tests_included=True, host=host)
482         file_list = host.filesystem.written_files.keys()
483         self.assertEqual(details.exit_code, 1)
484         expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
485         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
486         self.assertTrue(json_string.find(expected_token) != -1)
487         self.assertTrue(json_string.find('"num_regressions":1') != -1)
488         self.assertTrue(json_string.find('"num_flaky":0') != -1)
489         self.assertTrue(json_string.find('"num_missing":1') != -1)
490
491     def test_pixel_test_directories(self):
492         host = MockHost()
493
494         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
495         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
496                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
497                 'failures/unexpected/image_not_in_pixeldir.html']
498         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
499
500         self.assertEqual(details.exit_code, 1)
501         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
502         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
503         self.assertTrue(json_string.find(expected_token) != -1)
504
505     def test_missing_and_unexpected_results_with_custom_exit_code(self):
506         # Test that we update expectations in place. If the expectation
507         # is missing, update the expected generic location.
508         class CustomExitCodePort(test.TestPort):
509             def exit_code_from_summarized_results(self, unexpected_results):
510                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
511
512         host = MockHost()
513         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
514         test_port = CustomExitCodePort(host, options=options)
515         details, err, _ = logging_run(['--no-show-results',
516             'failures/expected/missing_image.html',
517             'failures/unexpected/missing_text.html',
518             'failures/unexpected/text-image-checksum.html'],
519             tests_included=True, host=host, port_obj=test_port)
520         self.assertEqual(details.exit_code, 2)
521
522     def test_crash_with_stderr(self):
523         host = MockHost()
524         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
525         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
526
527     def test_no_image_failure_with_image_diff(self):
528         host = MockHost()
529         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
530         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
531
532     def test_crash_log(self):
533         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
534         # Currently CrashLog uploading only works on Darwin.
535         if not self._platform.is_mac():
536             return
537         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
538         host = MockHost()
539         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
540         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
541         expected_crash_log = mock_crash_report
542         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
543
544     def test_web_process_crash_log(self):
545         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
546         # Currently CrashLog uploading only works on Darwin.
547         if not self._platform.is_mac():
548             return
549         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
550         host = MockHost()
551         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
552         logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
553         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
554
555     def test_exit_after_n_failures_upload(self):
556         host = MockHost()
557         details, regular_output, user = logging_run(
558            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
559            tests_included=True, host=host)
560
561         # By returning False, we know that the incremental results were generated and then deleted.
562         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
563
564         # This checks that we report only the number of tests that actually failed.
565         self.assertEqual(details.exit_code, 1)
566
567         # This checks that passes/text.html is considered SKIPped.
568         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
569
570         # This checks that we told the user we bailed out.
571         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
572
573         # This checks that neither test ran as expected.
574         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
575         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
576
577     def test_exit_after_n_failures(self):
578         # Unexpected failures should result in tests stopping.
579         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
580         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
581
582         # But we'll keep going for expected ones.
583         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
584         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
585
586     def test_exit_after_n_crashes(self):
587         # Unexpected crashes should result in tests stopping.
588         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
589         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
590
591         # Same with timeouts.
592         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
593         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
594
595         # But we'll keep going for expected ones.
596         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
597         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
598
599     def test_results_directory_absolute(self):
600         # We run a configuration that should fail, to generate output, then
601         # look for what the output results url was.
602
603         host = MockHost()
604         with host.filesystem.mkdtemp() as tmpdir:
605             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
606             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
607
608     def test_results_directory_default(self):
609         # We run a configuration that should fail, to generate output, then
610         # look for what the output results url was.
611
612         # This is the default location.
613         _, _, user = logging_run(tests_included=True)
614         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
615
616     def test_results_directory_relative(self):
617         # We run a configuration that should fail, to generate output, then
618         # look for what the output results url was.
619         host = MockHost()
620         host.filesystem.maybe_make_directory('/tmp/cwd')
621         host.filesystem.chdir('/tmp/cwd')
622         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
623         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
624
625     def test_retrying_and_flaky_tests(self):
626         host = MockHost()
627         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
628         self.assertEqual(details.exit_code, 0)
629         self.assertTrue('Retrying' in err.getvalue())
630         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
631         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
632
633         # Now we test that --clobber-old-results does remove the old entries and the old retries,
634         # and that we don't retry again.
635         host = MockHost()
636         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
637         self.assertEqual(details.exit_code, 1)
638         self.assertTrue('Clobbering old results' in err.getvalue())
639         self.assertTrue('flaky/text.html' in err.getvalue())
640         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
641         self.assertFalse(host.filesystem.exists('retries'))
642
643     def test_retrying_force_pixel_tests(self):
644         host = MockHost()
645         details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
646         self.assertEqual(details.exit_code, 1)
647         self.assertTrue('Retrying' in err.getvalue())
648         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
649         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
650         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
651         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
652         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
653         json = parse_full_results(json_string)
654         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
655             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
656         self.assertFalse(json["pixel_tests_enabled"])
657         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
658
659     def test_failed_text_with_missing_pixel_results_on_retry(self):
660         # Test what happens when pixel results are missing on retry.
661         host = MockHost()
662         details, err, _ = logging_run(['--no-show-results',
663             '--no-new-test-results', '--no-pixel-tests',
664             'failures/unexpected/text-image-missing.html'],
665             tests_included=True, host=host)
666         file_list = host.filesystem.written_files.keys()
667         self.assertEqual(details.exit_code, 1)
668         expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
669         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
670         self.assertTrue(json_string.find(expected_token) != -1)
671         self.assertTrue(json_string.find('"num_regressions":1') != -1)
672         self.assertTrue(json_string.find('"num_flaky":0') != -1)
673         self.assertTrue(json_string.find('"num_missing":1') != -1)
674
675     def test_retrying_uses_retries_directory(self):
676         host = MockHost()
677         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
678         self.assertEqual(details.exit_code, 1)
679         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
680         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
681
682     def test_run_order__inline(self):
683         # These next tests test that we run the tests in ascending alphabetical
684         # order per directory. HTTP tests are sharded separately from other tests,
685         # so we have to test both.
686         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
687         self.assertEqual(tests_run, sorted(tests_run))
688
689         tests_run = get_tests_run(['http/tests/passes'])
690         self.assertEqual(tests_run, sorted(tests_run))
691
692     def test_tolerance(self):
693         class ImageDiffTestPort(test.TestPort):
694             def diff_image(self, expected_contents, actual_contents, tolerance=None):
695                 self.tolerance_used_for_diff_image = self._options.tolerance
696                 return (True, 1, None)
697
698         def get_port_for_run(args):
699             options, parsed_args = run_webkit_tests.parse_args(args)
700             host = MockHost()
701             test_port = ImageDiffTestPort(host, options=options)
702             res = passing_run(args, port_obj=test_port, tests_included=True)
703             self.assertTrue(res)
704             return test_port
705
706         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
707
708         # If we pass in an explicit tolerance argument, then that will be used.
709         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
710         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
711         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
712         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
713
714         # Otherwise the port's default tolerance behavior (including ignoring it)
715         # should be used.
716         test_port = get_port_for_run(base_args)
717         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
718
719     def test_reftest_run(self):
720         tests_run = get_tests_run(['passes/reftest.html'])
721         self.assertEqual(['passes/reftest.html'], tests_run)
722
723     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
724         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
725         self.assertEqual(['passes/reftest.html'], tests_run)
726
727     def test_reftest_skip_reftests_if_no_ref_tests(self):
728         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
729         self.assertEqual([], tests_run)
730         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
731         self.assertEqual([], tests_run)
732
733     def test_reftest_expected_html_should_be_ignored(self):
734         tests_run = get_tests_run(['passes/reftest-expected.html'])
735         self.assertEqual([], tests_run)
736
737     def test_reftest_driver_should_run_expected_html(self):
738         tests_run = get_test_results(['passes/reftest.html'])
739         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
740
741     def test_reftest_driver_should_run_expected_mismatch_html(self):
742         tests_run = get_test_results(['passes/mismatch.html'])
743         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
744
745     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
746         host = MockHost()
747         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
748         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
749         self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
750         self.assertTrue(json_string.find('"num_regressions":4') != -1)
751         self.assertTrue(json_string.find('"num_flaky":0') != -1)
752         self.assertTrue(json_string.find('"num_missing":1') != -1)
753
754     def test_additional_platform_directory(self):
755         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
756         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
757         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
758         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
759
760     def test_additional_expectations(self):
761         host = MockHost()
762         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
763         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
764                                     tests_included=True, host=host))
765
766     def test_no_http_and_force(self):
767         # See test_run_force, using --force raises an exception.
768         # FIXME: We would like to check the warnings generated.
769         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
770
771     @staticmethod
772     def has_test_of_type(tests, type):
773         return [test for test in tests if type in test]
774
775     def test_no_http_tests(self):
776         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
777         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
778         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
779
780         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
781         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
782         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
783
784         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
785         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
786         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
787
788     def test_platform_tests_are_found(self):
789         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
790         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
791         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
792
793     def test_output_diffs(self):
794         # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
795         host = MockHost()
796         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
797         written_files = host.filesystem.written_files
798         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
799         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
800
801         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
802         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
803         self.assertEqual(full_results['has_pretty_patch'], False)
804
805     def test_unsupported_platform(self):
806         stdout = StringIO.StringIO()
807         stderr = StringIO.StringIO()
808         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
809
810         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
811         self.assertEqual(stdout.getvalue(), '')
812         self.assertTrue('unsupported platform' in stderr.getvalue())
813
814     def test_verbose_in_child_processes(self):
815         # When we actually run multiple processes, we may have to reconfigure logging in the
816         # child process (e.g., on win32) and we need to make sure that works and we still
817         # see the verbose log output. However, we can't use logging_run() because using
818         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
819
820         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
821         if not self.should_test_processes:
822             return
823
824         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
825         host = MockHost()
826         port_obj = host.port_factory.get(port_name=options.platform, options=options)
827         logging_stream = StringIO.StringIO()
828         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
829         self.assertTrue('text.html passed' in logging_stream.getvalue())
830         self.assertTrue('image.html passed' in logging_stream.getvalue())
831
832
833 class EndToEndTest(unittest.TestCase):
834     def test_reftest_with_two_notrefs(self):
835         # Test that we update expectations in place. If the expectation
836         # is missing, update the expected generic location.
837         host = MockHost()
838         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
839         file_list = host.filesystem.written_files.keys()
840
841         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
842         json = parse_full_results(json_string)
843         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
844         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
845         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
846         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
847             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"})
848         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
849             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"})
850         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
851             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"})
852
853
854 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
855     def assertBaselines(self, file_list, file, extensions, err):
856         "assert that the file_list contains the baselines."""
857         for ext in extensions:
858             baseline = file + "-expected" + ext
859             baseline_msg = 'Writing new expected result "%s"\n' % baseline
860             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
861             self.assertContains(err, baseline_msg)
862
863     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
864     # supposed to be.
865
866     def test_reset_results(self):
867         # Test that we update expectations in place. If the expectation
868         # is missing, update the expected generic location.
869         host = MockHost()
870         details, err, _ = logging_run(
871             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
872             tests_included=True, host=host, new_results=True)
873         file_list = host.filesystem.written_files.keys()
874         self.assertEqual(details.exit_code, 0)
875         self.assertEqual(len(file_list), 8)
876         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
877         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
878
879     def test_missing_results(self):
880         # Test that we update expectations in place. If the expectation
881         # is missing, update the expected generic location.
882         host = MockHost()
883         details, err, _ = logging_run(['--no-show-results',
884             'failures/unexpected/missing_text.html',
885             'failures/unexpected/missing_image.html',
886             'failures/unexpected/missing_audio.html',
887             'failures/unexpected/missing_render_tree_dump.html'],
888             tests_included=True, host=host, new_results=True)
889         file_list = host.filesystem.written_files.keys()
890         self.assertEqual(details.exit_code, 0)
891         self.assertEqual(len(file_list), 10)
892         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
893         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
894         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
895
896     def test_new_baseline(self):
897         # Test that we update the platform expectations in the version-specific directories
898         # for both existing and new baselines.
899         host = MockHost()
900         details, err, _ = logging_run(
901             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
902             tests_included=True, host=host, new_results=True)
903         file_list = host.filesystem.written_files.keys()
904         self.assertEqual(details.exit_code, 0)
905         self.assertEqual(len(file_list), 8)
906         self.assertBaselines(file_list,
907             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
908         self.assertBaselines(file_list,
909             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
910
911
912 class PortTest(unittest.TestCase):
913     def assert_mock_port_works(self, port_name, args=[]):
914         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
915
916     def disabled_test_mac_lion(self):
917         self.assert_mock_port_works('mac-lion')
918
919
920 class MainTest(unittest.TestCase):
921     def test_exception_handling(self):
922         orig_run_fn = run_webkit_tests.run
923
924         # unused args pylint: disable=W0613
925         def interrupting_run(port, options, args, stderr):
926             raise KeyboardInterrupt
927
928         def successful_run(port, options, args, stderr):
929
930             class FakeRunDetails(object):
931                 exit_code = -1
932
933             return FakeRunDetails()
934
935         def exception_raising_run(port, options, args, stderr):
936             assert False
937
938         stdout = StringIO.StringIO()
939         stderr = StringIO.StringIO()
940         try:
941             run_webkit_tests.run = interrupting_run
942             res = run_webkit_tests.main([], stdout, stderr)
943             self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
944
945             run_webkit_tests.run = successful_run
946             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
947             self.assertEqual(res, -1)
948
949             run_webkit_tests.run = exception_raising_run
950             res = run_webkit_tests.main([], stdout, stderr)
951             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
952         finally:
953             run_webkit_tests.run = orig_run_fn