Test regressions are not detected when image result is missing
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import codecs
32 import json
33 import logging
34 import os
35 import platform
36 import Queue
37 import re
38 import StringIO
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.port import Port
54 from webkitpy.port import test
55 from webkitpy.test.skip import skip_if
56 from webkitpy.tool.mocktool import MockOptions
57
58
59 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
60     extra_args = extra_args or []
61     args = []
62     if not '--platform' in extra_args:
63         args.extend(['--platform', 'test'])
64     if not new_results:
65         args.append('--no-new-test-results')
66
67     if not '--child-processes' in extra_args:
68         args.extend(['--child-processes', 1])
69     args.extend(extra_args)
70     if not tests_included:
71         # We use the glob to test that globbing works.
72         args.extend(['passes',
73                      'http/tests',
74                      'websocket/tests',
75                      'failures/expected/*'])
76     return run_webkit_tests.parse_args(args)
77
78
79 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
80     options, parsed_args = parse_args(extra_args, tests_included)
81     if not port_obj:
82         host = host or MockHost()
83         port_obj = host.port_factory.get(port_name=options.platform, options=options)
84
85     if shared_port:
86         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
87
88     logging_stream = StringIO.StringIO()
89     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
90     return run_details.exit_code == 0
91
92
93 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
94     options, parsed_args = parse_args(extra_args=extra_args,
95                                       tests_included=tests_included,
96                                       print_nothing=False, new_results=new_results)
97     host = host or MockHost()
98     if not port_obj:
99         port_obj = host.port_factory.get(port_name=options.platform, options=options)
100
101     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
102     return (run_details, output, host.user)
103
104
105 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
106     if shared_port:
107         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
108     oc = outputcapture.OutputCapture()
109     try:
110         oc.capture_output()
111         logging_stream = StringIO.StringIO()
112         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
113     finally:
114         oc.restore_output()
115     return (run_details, logging_stream)
116
117
118 def get_tests_run(args, host=None):
119     results = get_test_results(args, host)
120     return [result.test_name for result in results]
121
122
123 def get_test_batches(args, host=None):
124     results = get_test_results(args, host)
125     batches = []
126     batch = []
127     current_pid = None
128     for result in results:
129         if batch and result.pid != current_pid:
130             batches.append(batch)
131             batch = []
132         batch.append(result.test_name)
133     if batch:
134         batches.append(batch)
135     return batches
136
137
138 def get_test_results(args, host=None):
139     options, parsed_args = parse_args(args, tests_included=True)
140
141     host = host or MockHost()
142     port_obj = host.port_factory.get(port_name=options.platform, options=options)
143
144     oc = outputcapture.OutputCapture()
145     oc.capture_output()
146     logging_stream = StringIO.StringIO()
147     try:
148         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
149     finally:
150         oc.restore_output()
151
152     all_results = []
153     if run_details.initial_results:
154         all_results.extend(run_details.initial_results.all_results)
155
156     if run_details.retry_results:
157         all_results.extend(run_details.retry_results.all_results)
158     return all_results
159
160
161 def parse_full_results(full_results_text):
162     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
163     compressed_results = json.loads(json_to_eval)
164     return compressed_results
165
166
167 class StreamTestingMixin(object):
168     def assertContains(self, stream, string):
169         self.assertTrue(string in stream.getvalue())
170
171     def assertEmpty(self, stream):
172         self.assertFalse(stream.getvalue())
173
174     def assertNotEmpty(self, stream):
175         self.assertTrue(stream.getvalue())
176
177
178 class RunTest(unittest.TestCase, StreamTestingMixin):
179     def setUp(self):
180         # A real PlatformInfo object is used here instead of a
181         # MockPlatformInfo because we need to actually check for
182         # Windows and Mac to skip some tests.
183         self._platform = SystemHost().platform
184
185         # FIXME: Remove this when we fix test-webkitpy to work
186         # properly on cygwin (bug 63846).
187         self.should_test_processes = not self._platform.is_win()
188
189     def test_basic(self):
190         options, args = parse_args(tests_included=True)
191         logging_stream = StringIO.StringIO()
192         host = MockHost()
193         port_obj = host.port_factory.get(options.platform, options)
194         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
195
196         # These numbers will need to be updated whenever we add new tests.
197         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
198         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
199         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
200         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
201         self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
202
203         one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
204             details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
205             len(details.initial_results.unexpected_results_by_name))
206         self.assertTrue(one_line_summary in logging_stream.buflist)
207
208         # Ensure the results were summarized properly.
209         self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
210
211         # Ensure the image diff percentage is in the results.
212         self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
213
214         # Ensure the results were written out and displayed.
215         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
216         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
217         self.assertEqual(json.loads(json_to_eval), details.summarized_results)
218
219         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
220
221
222     def test_batch_size(self):
223         batch_tests_run = get_test_batches(['--batch-size', '2'])
224         for batch in batch_tests_run:
225             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
226
227     def test_max_locked_shards(self):
228         # Tests for the default of using one locked shard even in the case of more than one child process.
229         if not self.should_test_processes:
230             return
231         save_env_webkit_test_max_locked_shards = None
232         if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
233             save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
234             del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
235         _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
236         try:
237             self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
238         finally:
239             if save_env_webkit_test_max_locked_shards:
240                 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
241
242     def test_child_processes_2(self):
243         if self.should_test_processes:
244             _, regular_output, _ = logging_run(
245                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
246             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
247
248     def test_child_processes_min(self):
249         if self.should_test_processes:
250             _, regular_output, _ = logging_run(
251                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
252                 tests_included=True, shared_port=False)
253             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
254
255     def test_dryrun(self):
256         tests_run = get_tests_run(['--dry-run'])
257         self.assertEqual(tests_run, [])
258
259         tests_run = get_tests_run(['-n'])
260         self.assertEqual(tests_run, [])
261
262     def test_exception_raised(self):
263         # Exceptions raised by a worker are treated differently depending on
264         # whether they are in-process or out. inline exceptions work as normal,
265         # which allows us to get the full stack trace and traceback from the
266         # worker. The downside to this is that it could be any error, but this
267         # is actually useful in testing.
268         #
269         # Exceptions raised in a separate process are re-packaged into
270         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
271         # be printed, but don't display properly in the unit test exception handlers.
272         self.assertRaises(BaseException, logging_run,
273             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
274
275         if self.should_test_processes:
276             self.assertRaises(BaseException, logging_run,
277                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
278
279     def test_full_results_html(self):
280         # FIXME: verify html?
281         details, _, _ = logging_run(['--full-results-html'])
282         self.assertEqual(details.exit_code, 0)
283
284     def test_hung_thread(self):
285         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
286         # Note that hang.html is marked as WontFix and all WontFix tests are
287         # expected to Pass, so that actually running them generates an "unexpected" error.
288         self.assertEqual(details.exit_code, 1)
289         self.assertNotEmpty(err)
290
291     def test_keyboard_interrupt(self):
292         # Note that this also tests running a test marked as SKIP if
293         # you specify it explicitly.
294         self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
295
296         if self.should_test_processes:
297             self.assertRaises(KeyboardInterrupt, logging_run,
298                 ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
299
300     def test_no_tests_found(self):
301         details, err, _ = logging_run(['resources'], tests_included=True)
302         self.assertEqual(details.exit_code, -1)
303         self.assertContains(err, 'No tests to run.\n')
304
305     def test_no_tests_found_2(self):
306         details, err, _ = logging_run(['foo'], tests_included=True)
307         self.assertEqual(details.exit_code, -1)
308         self.assertContains(err, 'No tests to run.\n')
309
310     def test_natural_order(self):
311         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
312         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
313         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
314
315     def test_natural_order_test_specified_multiple_times(self):
316         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
317         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
318         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
319
320     def test_random_order(self):
321         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
322         tests_run = get_tests_run(['--order=random'] + tests_to_run)
323         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
324
325     def test_random_order_test_specified_multiple_times(self):
326         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
327         tests_run = get_tests_run(['--order=random'] + tests_to_run)
328         self.assertEqual(tests_run.count('passes/audio.html'), 2)
329         self.assertEqual(tests_run.count('passes/args.html'), 2)
330
331     def test_no_order(self):
332         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
333         tests_run = get_tests_run(['--order=none'] + tests_to_run)
334         self.assertEqual(tests_to_run, tests_run)
335
336     def test_no_order_test_specified_multiple_times(self):
337         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
338         tests_run = get_tests_run(['--order=none'] + tests_to_run)
339         self.assertEqual(tests_to_run, tests_run)
340
341     def test_no_order_with_directory_entries_in_natural_order(self):
342         tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
343         tests_run = get_tests_run(['--order=none'] + tests_to_run)
344         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
345
346     def test_gc_between_tests(self):
347         self.assertTrue(passing_run(['--gc-between-tests']))
348
349     def test_complex_text(self):
350         self.assertTrue(passing_run(['--complex-text']))
351
352     def test_threaded(self):
353         self.assertTrue(passing_run(['--threaded']))
354
355     def test_repeat_each(self):
356         tests_to_run = ['passes/image.html', 'passes/text.html']
357         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
358         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
359
360     def test_ignore_flag(self):
361         # Note that passes/image.html is expected to be run since we specified it directly.
362         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
363         self.assertFalse('passes/text.html' in tests_run)
364         self.assertTrue('passes/image.html' in tests_run)
365
366     def test_skipped_flag(self):
367         tests_run = get_tests_run(['passes'])
368         self.assertFalse('passes/skipped/skip.html' in tests_run)
369         num_tests_run_by_default = len(tests_run)
370
371         # Check that nothing changes when we specify skipped=default.
372         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
373                           num_tests_run_by_default)
374
375         # Now check that we run one more test (the skipped one).
376         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
377         self.assertTrue('passes/skipped/skip.html' in tests_run)
378         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
379
380         # Now check that we only run the skipped test.
381         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
382
383         # Now check that we don't run anything.
384         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
385
386     def test_iterations(self):
387         tests_to_run = ['passes/image.html', 'passes/text.html']
388         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
389         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
390
391     def test_repeat_each_iterations_num_tests(self):
392         # The total number of tests should be: number_of_tests *
393         # repeat_each * iterations
394         host = MockHost()
395         _, err, _ = logging_run(
396             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
397             tests_included=True, host=host)
398         self.assertContains(err, "All 16 tests ran as expected.\n")
399
400     def test_run_chunk(self):
401         # Test that we actually select the right chunk
402         all_tests_run = get_tests_run(['passes', 'failures'])
403         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
404         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
405
406         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
407         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
408         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
409         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
410
411     def test_run_force(self):
412         # This raises an exception because we run
413         # failures/expected/exception.html, which is normally SKIPped.
414
415         self.assertRaises(ValueError, logging_run, ['--force'])
416
417     def test_run_part(self):
418         # Test that we actually select the right part
419         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
420         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
421         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
422
423         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
424         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
425         # last part repeats the first two tests).
426         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
427         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
428
429     def test_run_singly(self):
430         batch_tests_run = get_test_batches(['--run-singly'])
431         for batch in batch_tests_run:
432             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
433
434     def test_skip_failing_tests(self):
435         # This tests that we skip both known failing and known flaky tests. Because there are
436         # no known flaky tests in the default test_expectations, we add additional expectations.
437         host = MockHost()
438         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
439
440         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
441         has_passes_text = False
442         for batch in batches:
443             self.assertFalse('failures/expected/text.html' in batch)
444             self.assertFalse('passes/image.html' in batch)
445             has_passes_text = has_passes_text or ('passes/text.html' in batch)
446         self.assertTrue(has_passes_text)
447
448     def test_run_singly_actually_runs_tests(self):
449         details, _, _ = logging_run(['--run-singly'], tests_included=True)
450         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
451
452     def test_single_file(self):
453         tests_run = get_tests_run(['passes/text.html'])
454         self.assertEqual(tests_run, ['passes/text.html'])
455
456     def test_single_file_with_prefix(self):
457         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
458         self.assertEqual(['passes/text.html'], tests_run)
459
460     def test_single_skipped_file(self):
461         tests_run = get_tests_run(['failures/expected/keybaord.html'])
462         self.assertEqual([], tests_run)
463
464     def test_stderr_is_saved(self):
465         host = MockHost()
466         self.assertTrue(passing_run(host=host))
467         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
468                           'stuff going to stderr')
469
470     def test_test_list(self):
471         host = MockHost()
472         filename = '/tmp/foo.txt'
473         host.filesystem.write_text_file(filename, 'passes/text.html')
474         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
475         self.assertEqual(['passes/text.html'], tests_run)
476         host.filesystem.remove(filename)
477         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
478         self.assertEqual(details.exit_code, -1)
479         self.assertNotEmpty(err)
480
481     def test_test_list_with_prefix(self):
482         host = MockHost()
483         filename = '/tmp/foo.txt'
484         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
485         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
486         self.assertEqual(['passes/text.html'], tests_run)
487
488     def test_missing_and_unexpected_results(self):
489         # Test that we update expectations in place. If the expectation
490         # is missing, update the expected generic location.
491         host = MockHost()
492         details, err, _ = logging_run(['--no-show-results',
493             'failures/expected/missing_image.html',
494             'failures/unexpected/missing_text.html',
495             'failures/unexpected/text-image-checksum.html'],
496             tests_included=True, host=host)
497         file_list = host.filesystem.written_files.keys()
498         self.assertEqual(details.exit_code, 1)
499         expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
500         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
501         self.assertTrue(json_string.find(expected_token) != -1)
502         self.assertTrue(json_string.find('"num_regressions":1') != -1)
503         self.assertTrue(json_string.find('"num_flaky":0') != -1)
504         self.assertTrue(json_string.find('"num_missing":1') != -1)
505
506     def test_pixel_test_directories(self):
507         host = MockHost()
508
509         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
510         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
511                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
512                 'failures/unexpected/image_not_in_pixeldir.html']
513         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
514
515         self.assertEqual(details.exit_code, 1)
516         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
517         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
518         self.assertTrue(json_string.find(expected_token) != -1)
519
520     def test_missing_and_unexpected_results_with_custom_exit_code(self):
521         # Test that we update expectations in place. If the expectation
522         # is missing, update the expected generic location.
523         class CustomExitCodePort(test.TestPort):
524             def exit_code_from_summarized_results(self, unexpected_results):
525                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
526
527         host = MockHost()
528         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
529         test_port = CustomExitCodePort(host, options=options)
530         details, err, _ = logging_run(['--no-show-results',
531             'failures/expected/missing_image.html',
532             'failures/unexpected/missing_text.html',
533             'failures/unexpected/text-image-checksum.html'],
534             tests_included=True, host=host, port_obj=test_port)
535         self.assertEqual(details.exit_code, 2)
536
537     def test_crash_with_stderr(self):
538         host = MockHost()
539         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
540         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
541
542     def test_no_image_failure_with_image_diff(self):
543         host = MockHost()
544         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
545         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
546
547     def test_crash_log(self):
548         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
549         # Currently CrashLog uploading only works on Darwin.
550         if not self._platform.is_mac():
551             return
552         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
553         host = MockHost()
554         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
555         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
556         expected_crash_log = mock_crash_report
557         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
558
559     def test_web_process_crash_log(self):
560         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
561         # Currently CrashLog uploading only works on Darwin.
562         if not self._platform.is_mac():
563             return
564         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
565         host = MockHost()
566         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
567         logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
568         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
569
570     def test_exit_after_n_failures_upload(self):
571         host = MockHost()
572         details, regular_output, user = logging_run(
573            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
574            tests_included=True, host=host)
575
576         # By returning False, we know that the incremental results were generated and then deleted.
577         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
578
579         # This checks that we report only the number of tests that actually failed.
580         self.assertEqual(details.exit_code, 1)
581
582         # This checks that passes/text.html is considered SKIPped.
583         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
584
585         # This checks that we told the user we bailed out.
586         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
587
588         # This checks that neither test ran as expected.
589         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
590         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
591
592     def test_exit_after_n_failures(self):
593         # Unexpected failures should result in tests stopping.
594         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
595         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
596
597         # But we'll keep going for expected ones.
598         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
599         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
600
601     def test_exit_after_n_crashes(self):
602         # Unexpected crashes should result in tests stopping.
603         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
604         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
605
606         # Same with timeouts.
607         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
608         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
609
610         # But we'll keep going for expected ones.
611         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
612         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
613
614     def test_results_directory_absolute(self):
615         # We run a configuration that should fail, to generate output, then
616         # look for what the output results url was.
617
618         host = MockHost()
619         with host.filesystem.mkdtemp() as tmpdir:
620             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
621             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
622
623     def test_results_directory_default(self):
624         # We run a configuration that should fail, to generate output, then
625         # look for what the output results url was.
626
627         # This is the default location.
628         _, _, user = logging_run(tests_included=True)
629         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
630
631     def test_results_directory_relative(self):
632         # We run a configuration that should fail, to generate output, then
633         # look for what the output results url was.
634         host = MockHost()
635         host.filesystem.maybe_make_directory('/tmp/cwd')
636         host.filesystem.chdir('/tmp/cwd')
637         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
638         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
639
640     def test_retrying_and_flaky_tests(self):
641         host = MockHost()
642         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
643         self.assertEqual(details.exit_code, 0)
644         self.assertTrue('Retrying' in err.getvalue())
645         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
646         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
647
648         # Now we test that --clobber-old-results does remove the old entries and the old retries,
649         # and that we don't retry again.
650         host = MockHost()
651         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
652         self.assertEqual(details.exit_code, 1)
653         self.assertTrue('Clobbering old results' in err.getvalue())
654         self.assertTrue('flaky/text.html' in err.getvalue())
655         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
656         self.assertFalse(host.filesystem.exists('retries'))
657
658     def test_retrying_force_pixel_tests(self):
659         host = MockHost()
660         details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
661         self.assertEqual(details.exit_code, 1)
662         self.assertTrue('Retrying' in err.getvalue())
663         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
664         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
665         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
666         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
667         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
668         json = parse_full_results(json_string)
669         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
670             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
671         self.assertFalse(json["pixel_tests_enabled"])
672         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
673
674     def test_failed_text_with_missing_pixel_results_on_retry(self):
675         # Test what happens when pixel results are missing on retry.
676         host = MockHost()
677         details, err, _ = logging_run(['--no-show-results',
678             '--no-new-test-results', '--no-pixel-tests',
679             'failures/unexpected/text-image-missing.html'],
680             tests_included=True, host=host)
681         file_list = host.filesystem.written_files.keys()
682         self.assertEqual(details.exit_code, 1)
683         expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
684         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
685         self.assertTrue(json_string.find(expected_token) != -1)
686         self.assertTrue(json_string.find('"num_regressions":1') != -1)
687         self.assertTrue(json_string.find('"num_flaky":0') != -1)
688         self.assertTrue(json_string.find('"num_missing":1') != -1)
689
690     def test_retrying_uses_retries_directory(self):
691         host = MockHost()
692         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
693         self.assertEqual(details.exit_code, 1)
694         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
695         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
696
697     def test_run_order__inline(self):
698         # These next tests test that we run the tests in ascending alphabetical
699         # order per directory. HTTP tests are sharded separately from other tests,
700         # so we have to test both.
701         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
702         self.assertEqual(tests_run, sorted(tests_run))
703
704         tests_run = get_tests_run(['http/tests/passes'])
705         self.assertEqual(tests_run, sorted(tests_run))
706
707     def test_tolerance(self):
708         class ImageDiffTestPort(test.TestPort):
709             def diff_image(self, expected_contents, actual_contents, tolerance=None):
710                 self.tolerance_used_for_diff_image = self._options.tolerance
711                 return (True, 1, None)
712
713         def get_port_for_run(args):
714             options, parsed_args = run_webkit_tests.parse_args(args)
715             host = MockHost()
716             test_port = ImageDiffTestPort(host, options=options)
717             res = passing_run(args, port_obj=test_port, tests_included=True)
718             self.assertTrue(res)
719             return test_port
720
721         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
722
723         # If we pass in an explicit tolerance argument, then that will be used.
724         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
725         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
726         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
727         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
728
729         # Otherwise the port's default tolerance behavior (including ignoring it)
730         # should be used.
731         test_port = get_port_for_run(base_args)
732         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
733
734     def test_virtual(self):
735         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
736                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
737
738     def test_reftest_run(self):
739         tests_run = get_tests_run(['passes/reftest.html'])
740         self.assertEqual(['passes/reftest.html'], tests_run)
741
742     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
743         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
744         self.assertEqual(['passes/reftest.html'], tests_run)
745
746     def test_reftest_skip_reftests_if_no_ref_tests(self):
747         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
748         self.assertEqual([], tests_run)
749         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
750         self.assertEqual([], tests_run)
751
752     def test_reftest_expected_html_should_be_ignored(self):
753         tests_run = get_tests_run(['passes/reftest-expected.html'])
754         self.assertEqual([], tests_run)
755
756     def test_reftest_driver_should_run_expected_html(self):
757         tests_run = get_test_results(['passes/reftest.html'])
758         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
759
760     def test_reftest_driver_should_run_expected_mismatch_html(self):
761         tests_run = get_test_results(['passes/mismatch.html'])
762         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
763
764     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
765         host = MockHost()
766         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
767         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
768         self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
769         self.assertTrue(json_string.find('"num_regressions":4') != -1)
770         self.assertTrue(json_string.find('"num_flaky":0') != -1)
771         self.assertTrue(json_string.find('"num_missing":1') != -1)
772
773     def test_additional_platform_directory(self):
774         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
775         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
776         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
777         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
778
779     def test_additional_expectations(self):
780         host = MockHost()
781         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
782         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
783                                     tests_included=True, host=host))
784
785     def test_no_http_and_force(self):
786         # See test_run_force, using --force raises an exception.
787         # FIXME: We would like to check the warnings generated.
788         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
789
790     @staticmethod
791     def has_test_of_type(tests, type):
792         return [test for test in tests if type in test]
793
794     def test_no_http_tests(self):
795         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
796         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
797         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
798
799         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
800         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
801         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
802
803         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
804         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
805         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
806
807     def test_platform_tests_are_found(self):
808         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
809         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
810         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
811
812     def test_output_diffs(self):
813         # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
814         host = MockHost()
815         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
816         written_files = host.filesystem.written_files
817         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
818         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
819
820         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
821         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
822         self.assertEqual(full_results['has_pretty_patch'], False)
823
824     def test_unsupported_platform(self):
825         stdout = StringIO.StringIO()
826         stderr = StringIO.StringIO()
827         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
828
829         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
830         self.assertEqual(stdout.getvalue(), '')
831         self.assertTrue('unsupported platform' in stderr.getvalue())
832
833     def test_verbose_in_child_processes(self):
834         # When we actually run multiple processes, we may have to reconfigure logging in the
835         # child process (e.g., on win32) and we need to make sure that works and we still
836         # see the verbose log output. However, we can't use logging_run() because using
837         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
838
839         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
840         if not self.should_test_processes:
841             return
842
843         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
844         host = MockHost()
845         port_obj = host.port_factory.get(port_name=options.platform, options=options)
846         logging_stream = StringIO.StringIO()
847         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
848         self.assertTrue('text.html passed' in logging_stream.getvalue())
849         self.assertTrue('image.html passed' in logging_stream.getvalue())
850
851
852 class EndToEndTest(unittest.TestCase):
853     def test_reftest_with_two_notrefs(self):
854         # Test that we update expectations in place. If the expectation
855         # is missing, update the expected generic location.
856         host = MockHost()
857         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
858         file_list = host.filesystem.written_files.keys()
859
860         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
861         json = parse_full_results(json_string)
862         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
863         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
864         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
865         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
866             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"})
867         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
868             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"})
869         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
870             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"})
871
872
873 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
874     def assertBaselines(self, file_list, file, extensions, err):
875         "assert that the file_list contains the baselines."""
876         for ext in extensions:
877             baseline = file + "-expected" + ext
878             baseline_msg = 'Writing new expected result "%s"\n' % baseline
879             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
880             self.assertContains(err, baseline_msg)
881
882     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
883     # supposed to be.
884
885     def test_reset_results(self):
886         # Test that we update expectations in place. If the expectation
887         # is missing, update the expected generic location.
888         host = MockHost()
889         details, err, _ = logging_run(
890             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
891             tests_included=True, host=host, new_results=True)
892         file_list = host.filesystem.written_files.keys()
893         self.assertEqual(details.exit_code, 0)
894         self.assertEqual(len(file_list), 8)
895         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
896         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
897
898     def test_missing_results(self):
899         # Test that we update expectations in place. If the expectation
900         # is missing, update the expected generic location.
901         host = MockHost()
902         details, err, _ = logging_run(['--no-show-results',
903             'failures/unexpected/missing_text.html',
904             'failures/unexpected/missing_image.html',
905             'failures/unexpected/missing_audio.html',
906             'failures/unexpected/missing_render_tree_dump.html'],
907             tests_included=True, host=host, new_results=True)
908         file_list = host.filesystem.written_files.keys()
909         self.assertEqual(details.exit_code, 0)
910         self.assertEqual(len(file_list), 10)
911         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
912         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
913         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
914
915     def test_new_baseline(self):
916         # Test that we update the platform expectations in the version-specific directories
917         # for both existing and new baselines.
918         host = MockHost()
919         details, err, _ = logging_run(
920             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
921             tests_included=True, host=host, new_results=True)
922         file_list = host.filesystem.written_files.keys()
923         self.assertEqual(details.exit_code, 0)
924         self.assertEqual(len(file_list), 8)
925         self.assertBaselines(file_list,
926             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
927         self.assertBaselines(file_list,
928             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
929
930
931 class PortTest(unittest.TestCase):
932     def assert_mock_port_works(self, port_name, args=[]):
933         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
934
935     def disabled_test_mac_lion(self):
936         self.assert_mock_port_works('mac-lion')
937
938
939 class MainTest(unittest.TestCase):
940     def test_exception_handling(self):
941         orig_run_fn = run_webkit_tests.run
942
943         # unused args pylint: disable=W0613
944         def interrupting_run(port, options, args, stderr):
945             raise KeyboardInterrupt
946
947         def successful_run(port, options, args, stderr):
948
949             class FakeRunDetails(object):
950                 exit_code = -1
951
952             return FakeRunDetails()
953
954         def exception_raising_run(port, options, args, stderr):
955             assert False
956
957         stdout = StringIO.StringIO()
958         stderr = StringIO.StringIO()
959         try:
960             run_webkit_tests.run = interrupting_run
961             res = run_webkit_tests.main([], stdout, stderr)
962             self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
963
964             run_webkit_tests.run = successful_run
965             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
966             self.assertEqual(res, -1)
967
968             run_webkit_tests.run = exception_raising_run
969             res = run_webkit_tests.main([], stdout, stderr)
970             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
971         finally:
972             run_webkit_tests.run = orig_run_fn