[GTK] Clear application cache between tests in DumpRenderTree
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 # Copyright (C) 2011 Apple Inc. All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met:
9 #
10 #     * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 #     * Redistributions in binary form must reproduce the above
13 # copyright notice, this list of conditions and the following disclaimer
14 # in the documentation and/or other materials provided with the
15 # distribution.
16 #     * Neither the name of Google Inc. nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32 import codecs
33 import itertools
34 import json
35 import logging
36 import platform
37 import Queue
38 import re
39 import StringIO
40 import sys
41 import thread
42 import time
43 import threading
44 import unittest
45
46 from webkitpy.common.system import outputcapture, path
47 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
48 from webkitpy.common.system.systemhost import SystemHost
49 from webkitpy.common.host import Host
50 from webkitpy.common.host_mock import MockHost
51
52 from webkitpy.layout_tests import port
53 from webkitpy.layout_tests import run_webkit_tests
54 from webkitpy.layout_tests.controllers.manager import WorkerException
55 from webkitpy.layout_tests.port import Port
56 from webkitpy.layout_tests.port.test import TestPort, TestDriver
57 from webkitpy.test.skip import skip_if
58 from webkitpy.tool.mocktool import MockOptions
59
60
61 def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
62     extra_args = extra_args or []
63     args = []
64     if not '--platform' in extra_args:
65         args.extend(['--platform', 'test'])
66     if not record_results:
67         args.append('--no-record-results')
68     if not new_results:
69         args.append('--no-new-test-results')
70
71     if not '--child-processes' in extra_args:
72         args.extend(['--child-processes', 1])
73     args.extend(extra_args)
74     if not tests_included:
75         # We use the glob to test that globbing works.
76         args.extend(['passes',
77                      'http/tests',
78                      'websocket/tests',
79                      'failures/expected/*'])
80     return run_webkit_tests.parse_args(args)
81
82
83 def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, shared_port=True):
84     options, parsed_args = parse_args(extra_args, record_results, tests_included)
85     if not port_obj:
86         host = host or MockHost()
87         port_obj = host.port_factory.get(port_name=options.platform, options=options)
88
89     if shared_port:
90         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
91
92     buildbot_output = StringIO.StringIO()
93     regular_output = StringIO.StringIO()
94     res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
95     return res == 0
96
97
98 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, new_results=False, shared_port=True):
99     options, parsed_args = parse_args(extra_args=extra_args,
100                                       record_results=record_results,
101                                       tests_included=tests_included,
102                                       print_nothing=False, new_results=new_results)
103     host = host or MockHost()
104     if not port_obj:
105         port_obj = host.port_factory.get(port_name=options.platform, options=options)
106
107     res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args, shared_port)
108     return (res, buildbot_output, regular_output, host.user)
109
110
111 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
112     if shared_port:
113         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
114     oc = outputcapture.OutputCapture()
115     try:
116         oc.capture_output()
117         buildbot_output = StringIO.StringIO()
118         regular_output = StringIO.StringIO()
119         res = run_webkit_tests.run(port_obj, options, parsed_args,
120                                    buildbot_output=buildbot_output,
121                                    regular_output=regular_output)
122     finally:
123         oc.restore_output()
124     return (res, buildbot_output, regular_output)
125
126
127 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
128                   host=None, include_reference_html=False):
129     extra_args = extra_args or []
130     if not tests_included:
131         # Not including http tests since they get run out of order (that
132         # behavior has its own test, see test_get_test_file_queue)
133         extra_args = ['passes', 'failures'] + extra_args
134     options, parsed_args = parse_args(extra_args, tests_included=True)
135
136     host = host or MockHost()
137     test_batches = []
138
139     class RecordingTestDriver(TestDriver):
140         def __init__(self, port, worker_number):
141             TestDriver.__init__(self, port, worker_number, pixel_tests=port.get_option('pixel_test'), no_timeout=False)
142             self._current_test_batch = None
143
144         def start(self):
145             pass
146
147         def stop(self):
148             self._current_test_batch = None
149
150         def run_test(self, test_input, stop_when_done):
151             if self._current_test_batch is None:
152                 self._current_test_batch = []
153                 test_batches.append(self._current_test_batch)
154             test_name = test_input.test_name
155             # In case of reftest, one test calls the driver's run_test() twice.
156             # We should not add a reference html used by reftests to tests unless include_reference_html parameter
157             # is explicitly given.
158             filesystem = self._port.host.filesystem
159             dirname, filename = filesystem.split(test_name)
160             if include_reference_html or not Port.is_reference_html_file(filesystem, dirname, filename):
161                 self._current_test_batch.append(test_name)
162             return TestDriver.run_test(self, test_input, stop_when_done)
163
164     class RecordingTestPort(TestPort):
165         def create_driver(self, worker_number):
166             return RecordingTestDriver(self, worker_number)
167
168     recording_port = RecordingTestPort(host, options=options)
169     run_and_capture(recording_port, options, parsed_args)
170
171     if flatten_batches:
172         return list(itertools.chain(*test_batches))
173
174     return test_batches
175
176
177 # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
178 # FIXME: It's nice to have a routine in port/test.py that returns this number.
179 unexpected_tests_count = 14
180
181
182 class StreamTestingMixin(object):
183     def assertContains(self, stream, string):
184         self.assertTrue(string in stream.getvalue())
185
186     def assertEmpty(self, stream):
187         self.assertFalse(stream.getvalue())
188
189     def assertNotEmpty(self, stream):
190         self.assertTrue(stream.getvalue())
191
192
193 class LintTest(unittest.TestCase, StreamTestingMixin):
194     def test_all_configurations(self):
195
196         class FakePort(object):
197             def __init__(self, host, name, path):
198                 self.host = host
199                 self.name = name
200                 self.path = path
201
202             def path_to_test_expectations_file(self):
203                 return self.path
204
205             def test_configuration(self):
206                 return None
207
208             def expectations_dict(self):
209                 self.host.ports_parsed.append(self.name)
210                 return {self.path: ''}
211
212             def skipped_layout_tests(self, tests):
213                 return set([])
214
215             def all_test_configurations(self):
216                 return []
217
218             def configuration_specifier_macros(self):
219                 return []
220
221             def path_from_webkit_base(self):
222                 return ''
223
224             def get_option(self, name, val):
225                 return val
226
227         class FakeFactory(object):
228             def __init__(self, host, ports):
229                 self.host = host
230                 self.ports = {}
231                 for port in ports:
232                     self.ports[port.name] = port
233
234             def get(self, port_name, *args, **kwargs):
235                 return self.ports[port_name]
236
237             def all_port_names(self):
238                 return sorted(self.ports.keys())
239
240         host = MockHost()
241         host.ports_parsed = []
242         host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
243                                                FakePort(host, 'b', 'path-to-b'),
244                                                FakePort(host, 'b-win', 'path-to-b')))
245
246         self.assertEquals(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform=None)), 0)
247         self.assertEquals(host.ports_parsed, ['a', 'b'])
248
249         host.ports_parsed = []
250         self.assertEquals(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform='a')), 0)
251         self.assertEquals(host.ports_parsed, ['a'])
252
253     def test_lint_test_files(self):
254         res, out, err, user = logging_run(['--lint-test-files'])
255         self.assertEqual(res, 0)
256         self.assertEmpty(out)
257         self.assertContains(err, 'Lint succeeded')
258
259     def test_lint_test_files__errors(self):
260         options, parsed_args = parse_args(['--lint-test-files'])
261         host = MockHost()
262         port_obj = host.port_factory.get(options.platform, options=options)
263         port_obj.expectations_dict = lambda: {'': '# syntax error'}
264         res, out, err = run_and_capture(port_obj, options, parsed_args)
265
266         self.assertEqual(res, -1)
267         self.assertEmpty(out)
268         self.assertTrue(any(['Lint failed' in msg for msg in err.buflist]))
269
270
271 class MainTest(unittest.TestCase, StreamTestingMixin):
272     def setUp(self):
273         # A real PlatformInfo object is used here instead of a
274         # MockPlatformInfo because we need to actually check for
275         # Windows and Mac to skip some tests.
276         self._platform = SystemHost().platform
277
278         # FIXME: Remove this when we fix test-webkitpy to work
279         # properly on cygwin (bug 63846).
280         self.should_test_processes = not self._platform.is_win()
281
282     def test_accelerated_compositing(self):
283         # This just tests that we recognize the command line args
284         self.assertTrue(passing_run(['--accelerated-video']))
285         self.assertTrue(passing_run(['--no-accelerated-video']))
286
287     def test_accelerated_2d_canvas(self):
288         # This just tests that we recognize the command line args
289         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
290         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
291
292     def test_all(self):
293         res, out, err, user = logging_run([], tests_included=True)
294         self.assertEquals(res, unexpected_tests_count)
295
296     def test_basic(self):
297         self.assertTrue(passing_run())
298
299     def test_batch_size(self):
300         batch_tests_run = get_tests_run(['--batch-size', '2'])
301         for batch in batch_tests_run:
302             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
303
304     def test_child_processes_2(self):
305         if self.should_test_processes:
306             _, _, regular_output, _ = logging_run(
307                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
308             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
309
310     def test_child_processes_min(self):
311         if self.should_test_processes:
312             _, _, regular_output, _ = logging_run(
313                 ['--debug-rwt-logging', '--child-processes', '2', 'passes'],
314                 tests_included=True, shared_port=False)
315             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
316
317     def test_dryrun(self):
318         batch_tests_run = get_tests_run(['--dry-run'])
319         self.assertEqual(batch_tests_run, [])
320
321         batch_tests_run = get_tests_run(['-n'])
322         self.assertEqual(batch_tests_run, [])
323
324     def test_exception_raised(self):
325         # Exceptions raised by a worker are treated differently depending on
326         # whether they are in-process or out. inline exceptions work as normal,
327         # which allows us to get the full stack trace and traceback from the
328         # worker. The downside to this is that it could be any error, but this
329         # is actually useful in testing.
330         #
331         # Exceptions raised in a separate process are re-packaged into
332         # WorkerExceptions, which have a string capture of the stack which can
333         # be printed, but don't display properly in the unit test exception handlers.
334         self.assertRaises(ValueError, logging_run,
335             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
336
337         if self.should_test_processes:
338             self.assertRaises(WorkerException, logging_run,
339                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
340
341     def test_full_results_html(self):
342         # FIXME: verify html?
343         res, out, err, user = logging_run(['--full-results-html'])
344         self.assertEqual(res, 0)
345
346     def test_hung_thread(self):
347         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
348                                           'failures/expected/hang.html'],
349                                           tests_included=True)
350         self.assertEqual(res, 0)
351         self.assertNotEmpty(out)
352         self.assertNotEmpty(err)
353
354     def test_keyboard_interrupt(self):
355         # Note that this also tests running a test marked as SKIP if
356         # you specify it explicitly.
357         self.assertRaises(KeyboardInterrupt, logging_run,
358             ['failures/expected/keyboard.html', '--child-processes', '1'],
359             tests_included=True)
360
361         if self.should_test_processes:
362             self.assertRaises(KeyboardInterrupt, logging_run,
363                 ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
364
365     def test_no_tests_found(self):
366         res, out, err, user = logging_run(['resources'], tests_included=True)
367         self.assertEqual(res, -1)
368         self.assertEmpty(out)
369         self.assertContains(err, 'No tests to run.\n')
370
371     def test_no_tests_found_2(self):
372         res, out, err, user = logging_run(['foo'], tests_included=True)
373         self.assertEqual(res, -1)
374         self.assertEmpty(out)
375         self.assertContains(err, 'No tests to run.\n')
376
377     def test_randomize_order(self):
378         # FIXME: verify order was shuffled
379         self.assertTrue(passing_run(['--randomize-order']))
380
381     def test_gc_between_tests(self):
382         self.assertTrue(passing_run(['--gc-between-tests']))
383
384     def test_complex_text(self):
385         self.assertTrue(passing_run(['--complex-text']))
386
387     def test_threaded(self):
388         self.assertTrue(passing_run(['--threaded']))
389
390     def test_repeat_each(self):
391         tests_to_run = ['passes/image.html', 'passes/text.html']
392         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
393         self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
394
395     def test_ignore_flag(self):
396         # Note that passes/image.html is expected to be run since we specified it directly.
397         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'], flatten_batches=True, tests_included=True)
398         self.assertFalse('passes/text.html' in tests_run)
399         self.assertTrue('passes/image.html' in tests_run)
400
401     def test_skipped_flag(self):
402         tests_run = get_tests_run(['passes'], tests_included=True, flatten_batches=True)
403         self.assertFalse('passes/skipped/skip.html' in tests_run)
404         num_tests_run_by_default = len(tests_run)
405
406         # Check that nothing changes when we specify skipped=default.
407         self.assertEquals(len(get_tests_run(['--skipped=default', 'passes'], tests_included=True, flatten_batches=True)),
408                           num_tests_run_by_default)
409
410         # Now check that we run one more test (the skipped one).
411         tests_run = get_tests_run(['--skipped=ignore', 'passes'], tests_included=True, flatten_batches=True)
412         self.assertTrue('passes/skipped/skip.html' in tests_run)
413         self.assertEquals(len(tests_run), num_tests_run_by_default + 1)
414
415         # Now check that we only run the skipped test.
416         self.assertEquals(get_tests_run(['--skipped=only', 'passes'], tests_included=True, flatten_batches=True),
417                           ['passes/skipped/skip.html'])
418
419     def test_iterations(self):
420         tests_to_run = ['passes/image.html', 'passes/text.html']
421         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
422         self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
423
424     def test_repeat_each_iterations_num_tests(self):
425         # The total number of tests should be: number_of_tests *
426         # repeat_each * iterations
427         host = MockHost()
428         res, out, err, _ = logging_run(['--iterations', '2',
429                                         '--repeat-each', '4',
430                                         '--debug-rwt-logging',
431                                         'passes/text.html', 'failures/expected/text.html'],
432                                        tests_included=True, host=host, record_results=True)
433         self.assertContains(out, "=> Results: 8/16 tests passed (50.0%)\n")
434         self.assertContains(err, "All 16 tests ran as expected.\n")
435
436     def test_run_chunk(self):
437         # Test that we actually select the right chunk
438         all_tests_run = get_tests_run(flatten_batches=True)
439         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
440         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
441
442         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
443         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
444         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
445         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
446
447     def test_run_force(self):
448         # This raises an exception because we run
449         # failures/expected/exception.html, which is normally SKIPped.
450
451         # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
452         self.assertRaises(ValueError, logging_run, ['--force'])
453
454     def test_run_part(self):
455         # Test that we actually select the right part
456         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
457         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
458         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
459
460         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
461         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
462         # last part repeats the first two tests).
463         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
464         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
465
466     def test_run_singly(self):
467         batch_tests_run = get_tests_run(['--run-singly'])
468         for batch in batch_tests_run:
469             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
470
471     def test_skip_failing_tests(self):
472         # This tests that we skip both known failing and known flaky tests. Because there are
473         # no known flaky tests in the default test_expectations, we add additional expectations.
474         host = MockHost()
475         host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : passes/image.html = IMAGE PASS\n')
476
477         batches = get_tests_run(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
478         has_passes_text = False
479         for batch in batches:
480             self.assertFalse('failures/expected/text.html' in batch)
481             self.assertFalse('passes/image.html' in batch)
482             has_passes_text = has_passes_text or ('passes/text.html' in batch)
483         self.assertTrue(has_passes_text)
484
485     def test_run_singly_actually_runs_tests(self):
486         res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
487         self.assertEquals(res, 10)
488
489     def test_single_file(self):
490         # FIXME: We should consider replacing more of the get_tests_run()-style tests
491         # with tests that read the tests_run* files, like this one.
492         host = MockHost()
493         tests_run = passing_run(['passes/text.html'], tests_included=True, host=host)
494         self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
495                           'passes/text.html\n')
496
497     def test_single_file_with_prefix(self):
498         tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
499         self.assertEquals(['passes/text.html'], tests_run)
500
501     def test_single_skipped_file(self):
502         tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
503         self.assertEquals([], tests_run)
504
505     def test_stderr_is_saved(self):
506         host = MockHost()
507         self.assertTrue(passing_run(host=host))
508         self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
509                           'stuff going to stderr')
510
511     def test_test_list(self):
512         host = MockHost()
513         filename = '/tmp/foo.txt'
514         host.filesystem.write_text_file(filename, 'passes/text.html')
515         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
516         self.assertEquals(['passes/text.html'], tests_run)
517         host.filesystem.remove(filename)
518         res, out, err, user = logging_run(['--test-list=%s' % filename],
519                                           tests_included=True, host=host)
520         self.assertEqual(res, -1)
521         self.assertNotEmpty(err)
522
523     def test_test_list_with_prefix(self):
524         host = MockHost()
525         filename = '/tmp/foo.txt'
526         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
527         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
528         self.assertEquals(['passes/text.html'], tests_run)
529
530     def test_unexpected_failures(self):
531         # Run tests including the unexpected failures.
532         self._url_opened = None
533         res, out, err, user = logging_run(tests_included=True)
534
535         self.assertEqual(res, unexpected_tests_count)
536         self.assertNotEmpty(out)
537         self.assertNotEmpty(err)
538         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
539
540     def test_missing_and_unexpected_results(self):
541         # Test that we update expectations in place. If the expectation
542         # is missing, update the expected generic location.
543         host = MockHost()
544         res, out, err, _ = logging_run(['--no-show-results',
545             'failures/expected/missing_image.html',
546             'failures/unexpected/missing_text.html',
547             'failures/unexpected/text-image-checksum.html'],
548             tests_included=True, host=host, record_results=True)
549         file_list = host.filesystem.written_files.keys()
550         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
551         self.assertEquals(res, 1)
552         expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
553         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
554         self.assertTrue(json_string.find(expected_token) != -1)
555         self.assertTrue(json_string.find('"num_regressions":1') != -1)
556         self.assertTrue(json_string.find('"num_flaky":0') != -1)
557         self.assertTrue(json_string.find('"num_missing":1') != -1)
558
559     def test_pixel_test_directories(self):
560         host = MockHost()
561
562         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
563         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
564                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
565                 'failures/unexpected/image_not_in_pixeldir.html']
566         res, out, err, _ = logging_run(extra_args=args, host=host, record_results=True, tests_included=True)
567
568         self.assertEquals(res, 1)
569         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
570         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
571         self.assertTrue(json_string.find(expected_token) != -1)
572
573     def test_missing_and_unexpected_results_with_custom_exit_code(self):
574         # Test that we update expectations in place. If the expectation
575         # is missing, update the expected generic location.
576         class CustomExitCodePort(TestPort):
577             def exit_code_from_summarized_results(self, unexpected_results):
578                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
579
580         host = MockHost()
581         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
582         test_port = CustomExitCodePort(host, options=options)
583         res, out, err, _ = logging_run(['--no-show-results',
584             'failures/expected/missing_image.html',
585             'failures/unexpected/missing_text.html',
586             'failures/unexpected/text-image-checksum.html'],
587             tests_included=True, host=host, record_results=True, port_obj=test_port)
588         self.assertEquals(res, 2)
589
590     def test_crash_with_stderr(self):
591         host = MockHost()
592         res, buildbot_output, regular_output, user = logging_run([
593                 'failures/unexpected/crash-with-stderr.html',
594             ],
595             tests_included=True,
596             record_results=True,
597             host=host)
598         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
599
600     def test_no_image_failure_with_image_diff(self):
601         host = MockHost()
602         res, buildbot_output, regular_output, user = logging_run([
603                 'failures/unexpected/checksum-with-matching-image.html',
604             ],
605             tests_included=True,
606             record_results=True,
607             host=host)
608         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
609
610     def test_crash_log(self):
611         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
612         # Currently CrashLog uploading only works on Darwin.
613         if not self._platform.is_mac():
614             return
615         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
616         host = MockHost()
617         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
618         res, buildbot_output, regular_output, user = logging_run([
619                 'failures/unexpected/crash-with-stderr.html',
620             ],
621             tests_included=True,
622             record_results=True,
623             host=host)
624         expected_crash_log = mock_crash_report
625         self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
626
627     def test_web_process_crash_log(self):
628         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
629         # Currently CrashLog uploading only works on Darwin.
630         if not self._platform.is_mac():
631             return
632         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
633         host = MockHost()
634         host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
635         res, buildbot_output, regular_output, user = logging_run([
636                 'failures/unexpected/web-process-crash-with-stderr.html',
637             ],
638             tests_included=True,
639             record_results=True,
640             host=host)
641         self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
642
643     def test_exit_after_n_failures_upload(self):
644         host = MockHost()
645         res, buildbot_output, regular_output, user = logging_run([
646                 'failures/unexpected/text-image-checksum.html',
647                 'passes/text.html',
648                 '--exit-after-n-failures', '1',
649             ],
650             tests_included=True,
651             record_results=True,
652             host=host)
653
654         # By returning False, we know that the incremental results were generated and then deleted.
655         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
656
657         # This checks that we report only the number of tests that actually failed.
658         self.assertEquals(res, 1)
659
660         # This checks that passes/text.html is considered SKIPped.
661         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
662
663         # This checks that we told the user we bailed out.
664         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
665
666         # This checks that neither test ran as expected.
667         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
668         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
669
670     def test_exit_after_n_failures(self):
671         # Unexpected failures should result in tests stopping.
672         tests_run = get_tests_run([
673                 'failures/unexpected/text-image-checksum.html',
674                 'passes/text.html',
675                 '--exit-after-n-failures', '1',
676             ],
677             tests_included=True,
678             flatten_batches=True)
679         self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
680
681         # But we'll keep going for expected ones.
682         tests_run = get_tests_run([
683                 'failures/expected/text.html',
684                 'passes/text.html',
685                 '--exit-after-n-failures', '1',
686             ],
687             tests_included=True,
688             flatten_batches=True)
689         self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
690
691     def test_exit_after_n_crashes(self):
692         # Unexpected crashes should result in tests stopping.
693         tests_run = get_tests_run([
694                 'failures/unexpected/crash.html',
695                 'passes/text.html',
696                 '--exit-after-n-crashes-or-timeouts', '1',
697             ],
698             tests_included=True,
699             flatten_batches=True)
700         self.assertEquals(['failures/unexpected/crash.html'], tests_run)
701
702         # Same with timeouts.
703         tests_run = get_tests_run([
704                 'failures/unexpected/timeout.html',
705                 'passes/text.html',
706                 '--exit-after-n-crashes-or-timeouts', '1',
707             ],
708             tests_included=True,
709             flatten_batches=True)
710         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
711
712         # But we'll keep going for expected ones.
713         tests_run = get_tests_run([
714                 'failures/expected/crash.html',
715                 'passes/text.html',
716                 '--exit-after-n-crashes-or-timeouts', '1',
717             ],
718             tests_included=True,
719             flatten_batches=True)
720         self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
721
722     def test_results_directory_absolute(self):
723         # We run a configuration that should fail, to generate output, then
724         # look for what the output results url was.
725
726         host = MockHost()
727         with host.filesystem.mkdtemp() as tmpdir:
728             res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
729                                               tests_included=True, host=host)
730             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
731
732     def test_results_directory_default(self):
733         # We run a configuration that should fail, to generate output, then
734         # look for what the output results url was.
735
736         # This is the default location.
737         res, out, err, user = logging_run(tests_included=True)
738         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
739
740     def test_results_directory_relative(self):
741         # We run a configuration that should fail, to generate output, then
742         # look for what the output results url was.
743         host = MockHost()
744         host.filesystem.maybe_make_directory('/tmp/cwd')
745         host.filesystem.chdir('/tmp/cwd')
746         res, out, err, user = logging_run(['--results-directory=foo'],
747                                           tests_included=True, host=host)
748         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
749
750     def test_retrying_and_flaky_tests(self):
751         host = MockHost()
752         res, out, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
753         self.assertEquals(res, 0)
754         self.assertTrue('Retrying' in err.getvalue())
755         self.assertTrue('Unexpected flakiness' in out.getvalue())
756         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
757         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/tests_run0.txt'))
758         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
759
760         # Now we test that --clobber-old-results does remove the old entries and the old retries,
761         # and that we don't retry again.
762         host = MockHost()
763         res, out, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
764         self.assertEquals(res, 1)
765         self.assertTrue('Clobbering old results' in err.getvalue())
766         self.assertTrue('flaky/text.html' in err.getvalue())
767         self.assertTrue('Unexpected text failures' in out.getvalue())
768         self.assertFalse('Unexpected flakiness' in out.getvalue())
769         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
770         self.assertFalse(host.filesystem.exists('retries'))
771
772     def test_run_order__inline(self):
773         # These next tests test that we run the tests in ascending alphabetical
774         # order per directory. HTTP tests are sharded separately from other tests,
775         # so we have to test both.
776         tests_run = get_tests_run(['passes'], tests_included=True, flatten_batches=True)
777         self.assertEquals(tests_run, sorted(tests_run))
778
779         tests_run = get_tests_run(['http/tests/passes'], tests_included=True, flatten_batches=True)
780         self.assertEquals(tests_run, sorted(tests_run))
781
782     def test_tolerance(self):
783         class ImageDiffTestPort(TestPort):
784             def diff_image(self, expected_contents, actual_contents, tolerance=None):
785                 self.tolerance_used_for_diff_image = self._options.tolerance
786                 return (True, 1, None)
787
788         def get_port_for_run(args):
789             options, parsed_args = run_webkit_tests.parse_args(args)
790             host = MockHost()
791             test_port = ImageDiffTestPort(host, options=options)
792             res = passing_run(args, port_obj=test_port, tests_included=True)
793             self.assertTrue(res)
794             return test_port
795
796         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
797
798         # If we pass in an explicit tolerance argument, then that will be used.
799         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
800         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
801         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
802         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
803
804         # Otherwise the port's default tolerance behavior (including ignoring it)
805         # should be used.
806         test_port = get_port_for_run(base_args)
807         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
808
809     def test_virtual(self):
810         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
811                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
812
813     def test_reftest_run(self):
814         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
815         self.assertEquals(['passes/reftest.html'], tests_run)
816
817     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
818         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
819         self.assertEquals(['passes/reftest.html'], tests_run)
820
821     def test_reftest_skip_reftests_if_no_ref_tests(self):
822         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
823         self.assertEquals([], tests_run)
824         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
825         self.assertEquals([], tests_run)
826
827     def test_reftest_expected_html_should_be_ignored(self):
828         tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
829         self.assertEquals([], tests_run)
830
831     def test_reftest_driver_should_run_expected_html(self):
832         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
833         self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
834
835     def test_reftest_driver_should_run_expected_mismatch_html(self):
836         tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
837         self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
838
839     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
840         host = MockHost()
841         res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
842         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
843         self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
844         self.assertTrue(json_string.find('"num_regressions":4') != -1)
845         self.assertTrue(json_string.find('"num_flaky":0') != -1)
846         self.assertTrue(json_string.find('"num_missing":1') != -1)
847
848     def test_additional_platform_directory(self):
849         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
850         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
851         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
852
853         res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
854         self.assertContains(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')
855
856     def test_additional_expectations(self):
857         host = MockHost()
858         host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : failures/unexpected/mismatch.html = IMAGE\n')
859         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
860                                      tests_included=True, host=host))
861
862     def test_no_http_and_force(self):
863         # See test_run_force, using --force raises an exception.
864         # FIXME: We would like to check the warnings generated.
865         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
866
867     @staticmethod
868     def has_test_of_type(tests, type):
869         return [test for test in tests if type in test]
870
871     def test_no_http_tests(self):
872         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
873         self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
874         self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
875
876         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
877         self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
878         self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
879
880         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
881         self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
882         self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
883
884     def test_platform_tests_are_found(self):
885         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'], tests_included=True, flatten_batches=True)
886         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
887         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
888
889     def test_output_diffs(self):
890         # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
891         # aren't available.
892         host = MockHost()
893         res, out, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'],
894                                        tests_included=True, record_results=True, host=host)
895         written_files = host.filesystem.written_files
896         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
897         self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
898         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
899
900         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
901         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
902         self.assertEquals(full_results['has_wdiff'], False)
903         self.assertEquals(full_results['has_pretty_patch'], False)
904
905     def test_unsupported_platform(self):
906         oc = outputcapture.OutputCapture()
907         try:
908             oc.capture_output()
909             res = run_webkit_tests.main(['--platform', 'foo'])
910         finally:
911             stdout, stderr, logs = oc.restore_output()
912
913         self.assertEquals(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
914         self.assertEquals(stdout, '')
915         self.assertTrue('unsupported platform' in stderr)
916
917         # This is empty because we don't even get a chance to configure the logger before failing.
918         self.assertEquals(logs, '')
919
920     def test_verbose_in_child_processes(self):
921         # When we actually run multiple processes, we may have to reconfigure logging in the
922         # child process (e.g., on win32) and we need to make sure that works and we still
923         # see the verbose log output. However, we can't use logging_run() because using
924         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
925         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
926         host = MockHost()
927         port_obj = host.port_factory.get(port_name=options.platform, options=options)
928         buildbot_output = StringIO.StringIO()
929         regular_output = StringIO.StringIO()
930         res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
931         self.assertTrue('text.html passed' in regular_output.getvalue())
932         self.assertTrue('image.html passed' in regular_output.getvalue())
933
934
935 class EndToEndTest(unittest.TestCase):
936     def parse_full_results(self, full_results_text):
937         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
938         compressed_results = json.loads(json_to_eval)
939         return compressed_results
940
941     def test_end_to_end(self):
942         host = MockHost()
943         res, out, err, user = logging_run(record_results=True, tests_included=True, host=host)
944
945         self.assertEquals(res, unexpected_tests_count)
946         results = self.parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
947
948         # Check to ensure we're passing back image diff %age correctly.
949         self.assertEquals(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
950
951         # Check that we attempted to display the results page in a browser.
952         self.assertTrue(user.opened_urls)
953
954     def test_reftest_with_two_notrefs(self):
955         # Test that we update expectations in place. If the expectation
956         # is missing, update the expected generic location.
957         host = MockHost()
958         res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
959         file_list = host.filesystem.written_files.keys()
960         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
961         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
962         json = self.parse_full_results(json_string)
963         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
964         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
965         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
966         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
967             {"expected": "PASS", "actual": "IMAGE", "image_diff_percent": 1, 'is_reftest': True})
968         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
969             {"expected": "PASS", "actual": "IMAGE", "is_mismatch_reftest": True})
970         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
971             {"expected": "PASS", "actual": "IMAGE", "is_mismatch_reftest": True})
972
973
974 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
975     def assertBaselines(self, file_list, file, extensions, err):
976         "assert that the file_list contains the baselines."""
977         for ext in extensions:
978             baseline = file + "-expected" + ext
979             baseline_msg = 'Writing new expected result "%s"\n' % baseline
980             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
981             self.assertContains(err, baseline_msg)
982
983     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
984     # supposed to be.
985
986     def test_reset_results(self):
987         # Test that we update expectations in place. If the expectation
988         # is missing, update the expected generic location.
989         host = MockHost()
990         res, out, err, _ = logging_run(['--pixel-tests',
991                         '--reset-results',
992                         'passes/image.html',
993                         'failures/expected/missing_image.html'],
994                         tests_included=True, host=host, new_results=True)
995         file_list = host.filesystem.written_files.keys()
996         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
997         self.assertEquals(res, 0)
998         self.assertEmpty(out)
999         self.assertEqual(len(file_list), 4)
1000         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
1001         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
1002
1003     def test_missing_results(self):
1004         # Test that we update expectations in place. If the expectation
1005         # is missing, update the expected generic location.
1006         host = MockHost()
1007         res, out, err, _ = logging_run(['--no-show-results',
1008                      'failures/unexpected/missing_text.html',
1009                      'failures/unexpected/missing_image.html',
1010                      'failures/unexpected/missing_audio.html',
1011                      'failures/unexpected/missing_render_tree_dump.html'],
1012                      tests_included=True, host=host, new_results=True)
1013         file_list = host.filesystem.written_files.keys()
1014         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
1015         self.assertEquals(res, 0)
1016         self.assertNotEmpty(out)
1017         self.assertEqual(len(file_list), 6)
1018         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
1019         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
1020         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
1021
1022     def test_new_baseline(self):
1023         # Test that we update the platform expectations in the version-specific directories
1024         # for both existing and new baselines.
1025         host = MockHost()
1026         res, out, err, _ = logging_run(['--pixel-tests',
1027                         '--new-baseline',
1028                         'passes/image.html',
1029                         'failures/expected/missing_image.html'],
1030                     tests_included=True, host=host, new_results=True)
1031         file_list = host.filesystem.written_files.keys()
1032         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
1033         self.assertEquals(res, 0)
1034         self.assertEmpty(out)
1035         self.assertEqual(len(file_list), 4)
1036         self.assertBaselines(file_list,
1037             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
1038         self.assertBaselines(file_list,
1039             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
1040
1041
1042 class PortTest(unittest.TestCase):
1043     def assert_mock_port_works(self, port_name, args=[]):
1044         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
1045
1046     def disabled_test_chromium_mac_lion(self):
1047         self.assert_mock_port_works('chromium-mac-lion')
1048
1049     def disabled_test_chromium_mac_lion_in_test_shell_mode(self):
1050         self.assert_mock_port_works('chromium-mac-lion', args=['--additional-drt-flag=--test-shell'])
1051
1052     def disabled_test_qt_linux(self):
1053         self.assert_mock_port_works('qt-linux')
1054
1055     def disabled_test_mac_lion(self):
1056         self.assert_mock_port_works('mac-lion')
1057
1058 if __name__ == '__main__':
1059     unittest.main()