webkitpy: Implement device type specific expected results (Part 2)
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import codecs
32 import json
33 import logging
34 import os
35 import platform
36 import Queue
37 import re
38 import StringIO
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
54 from webkitpy.port import Port
55 from webkitpy.port import test
56 from webkitpy.test.skip import skip_if
57 from webkitpy.xcode.device_type import DeviceType
58
59
60 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
61     extra_args = extra_args or []
62     args = []
63     if not '--platform' in extra_args:
64         args.extend(['--platform', 'test'])
65     if not new_results:
66         args.append('--no-new-test-results')
67
68     if not '--child-processes' in extra_args:
69         args.extend(['--child-processes', 1])
70
71     if not '--world-leaks' in extra_args:
72         args.append('--world-leaks')
73
74     args.extend(extra_args)
75     if not tests_included:
76         # We use the glob to test that globbing works.
77         args.extend(['passes',
78                      'http/tests',
79                      'websocket/tests',
80                      'failures/expected/*'])
81     return run_webkit_tests.parse_args(args)
82
83
84 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
85     options, parsed_args = parse_args(extra_args, tests_included)
86     if not port_obj:
87         host = host or MockHost()
88         port_obj = host.port_factory.get(port_name=options.platform, options=options)
89
90     if shared_port:
91         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
92
93     logging_stream = StringIO.StringIO()
94     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
95     return run_details.exit_code == 0
96
97
98 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
99     options, parsed_args = parse_args(extra_args=extra_args,
100                                       tests_included=tests_included,
101                                       print_nothing=False, new_results=new_results)
102     host = host or MockHost()
103     if not port_obj:
104         port_obj = host.port_factory.get(port_name=options.platform, options=options)
105
106     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
107     return (run_details, output, host.user)
108
109
110 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
111     if shared_port:
112         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
113     oc = outputcapture.OutputCapture()
114     try:
115         oc.capture_output()
116         logging_stream = StringIO.StringIO()
117         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
118     finally:
119         oc.restore_output()
120     return (run_details, logging_stream)
121
122
123 def get_tests_run(args, host=None):
124     results = get_test_results(args, host)
125     return [result.test_name for result in results]
126
127
128 def get_test_batches(args, host=None):
129     results = get_test_results(args, host)
130     batches = []
131     batch = []
132     current_pid = None
133     for result in results:
134         if batch and result.pid != current_pid:
135             batches.append(batch)
136             batch = []
137         batch.append(result.test_name)
138     if batch:
139         batches.append(batch)
140     return batches
141
142
143 def get_test_results(args, host=None):
144     options, parsed_args = parse_args(args, tests_included=True)
145
146     host = host or MockHost()
147     port_obj = host.port_factory.get(port_name=options.platform, options=options)
148
149     oc = outputcapture.OutputCapture()
150     oc.capture_output()
151     logging_stream = StringIO.StringIO()
152     try:
153         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
154     finally:
155         oc.restore_output()
156
157     all_results = []
158     if run_details.initial_results:
159         all_results.extend(run_details.initial_results.all_results)
160
161     if run_details.retry_results:
162         all_results.extend(run_details.retry_results.all_results)
163     return all_results
164
165
166 def parse_full_results(full_results_text):
167     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
168     compressed_results = json.loads(json_to_eval)
169     return compressed_results
170
171
172 class StreamTestingMixin(object):
173     def assertContains(self, stream, string):
174         self.assertTrue(string in stream.getvalue())
175
176     def assertEmpty(self, stream):
177         self.assertFalse(stream.getvalue())
178
179     def assertNotEmpty(self, stream):
180         self.assertTrue(stream.getvalue())
181
182
183 class RunTest(unittest.TestCase, StreamTestingMixin):
184     def setUp(self):
185         # A real PlatformInfo object is used here instead of a
186         # MockPlatformInfo because we need to actually check for
187         # Windows and Mac to skip some tests.
188         self._platform = SystemHost().platform
189
190         # FIXME: Remove this when we fix test-webkitpy to work
191         # properly on cygwin (bug 63846).
192         self.should_test_processes = not self._platform.is_win()
193
194     def test_basic(self):
195         options, args = parse_args(tests_included=True)
196         logging_stream = StringIO.StringIO()
197         host = MockHost()
198         port_obj = host.port_factory.get(options.platform, options)
199         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
200
201         # These numbers will need to be updated whenever we add new tests.
202         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
203         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
204         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
205         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
206         self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
207
208         one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
209             details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
210             len(details.initial_results.unexpected_results_by_name))
211         self.assertTrue(one_line_summary in logging_stream.buflist)
212
213         # Ensure the results were summarized properly.
214         self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
215
216         # Ensure the image diff percentage is in the results.
217         self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
218
219         # Ensure the results were written out and displayed.
220         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
221         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
222         self.assertEqual(json.loads(json_to_eval), details.summarized_results)
223
224         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
225
226     def test_batch_size(self):
227         batch_tests_run = get_test_batches(['--batch-size', '2'])
228         for batch in batch_tests_run:
229             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
230
231     def test_child_processes_2(self):
232         if self.should_test_processes:
233             _, regular_output, _ = logging_run(
234                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
235             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
236
237     def test_child_processes_min(self):
238         if self.should_test_processes:
239             _, regular_output, _ = logging_run(
240                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
241                 tests_included=True, shared_port=False)
242             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
243
244     def test_dryrun(self):
245         tests_run = get_tests_run(['--dry-run'])
246         self.assertEqual(tests_run, [])
247
248         tests_run = get_tests_run(['-n'])
249         self.assertEqual(tests_run, [])
250
251     def test_exception_raised(self):
252         # Exceptions raised by a worker are treated differently depending on
253         # whether they are in-process or out. inline exceptions work as normal,
254         # which allows us to get the full stack trace and traceback from the
255         # worker. The downside to this is that it could be any error, but this
256         # is actually useful in testing.
257         #
258         # Exceptions raised in a separate process are re-packaged into
259         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
260         # be printed, but don't display properly in the unit test exception handlers.
261         self.assertRaises(BaseException, logging_run,
262             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
263
264         if self.should_test_processes:
265             self.assertRaises(BaseException, logging_run,
266                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
267
268     def test_full_results_html(self):
269         # FIXME: verify html?
270         details, _, _ = logging_run(['--full-results-html'])
271         self.assertEqual(details.exit_code, 0)
272
273     def test_hung_thread(self):
274         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
275         # Note that hang.html is marked as WontFix and all WontFix tests are
276         # expected to Pass, so that actually running them generates an "unexpected" error.
277         self.assertEqual(details.exit_code, 1)
278         self.assertNotEmpty(err)
279
280     def test_keyboard_interrupt(self):
281         # Note that this also tests running a test marked as SKIP if
282         # you specify it explicitly.
283         details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
284         self.assertEqual(details.exit_code, INTERRUPTED_EXIT_STATUS)
285
286         if self.should_test_processes:
287             _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
288             self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
289
290     def test_no_tests_found(self):
291         details, err, _ = logging_run(['resources'], tests_included=True)
292         self.assertEqual(details.exit_code, -1)
293         self.assertContains(err, 'No tests to run.\n')
294
295     def test_no_tests_found_2(self):
296         details, err, _ = logging_run(['foo'], tests_included=True)
297         self.assertEqual(details.exit_code, -1)
298         self.assertContains(err, 'No tests to run.\n')
299
300     def test_natural_order(self):
301         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
302         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
303         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
304
305     def test_natural_order_test_specified_multiple_times(self):
306         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
307         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
308         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
309
310     def test_random_order(self):
311         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
312         tests_run = get_tests_run(['--order=random'] + tests_to_run)
313         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
314
315     def test_random_order_test_specified_multiple_times(self):
316         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
317         tests_run = get_tests_run(['--order=random'] + tests_to_run)
318         self.assertEqual(tests_run.count('passes/audio.html'), 2)
319         self.assertEqual(tests_run.count('passes/args.html'), 2)
320
321     def test_no_order(self):
322         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
323         tests_run = get_tests_run(['--order=none'] + tests_to_run)
324         self.assertEqual(tests_to_run, tests_run)
325
326     def test_no_order_test_specified_multiple_times(self):
327         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
328         tests_run = get_tests_run(['--order=none'] + tests_to_run)
329         self.assertEqual(tests_to_run, tests_run)
330
331     def test_no_order_with_directory_entries_in_natural_order(self):
332         tests_to_run = ['http/tests/ssl', 'http/tests/passes']
333         tests_run = get_tests_run(['--order=none'] + tests_to_run)
334         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
335
336     def test_gc_between_tests(self):
337         self.assertTrue(passing_run(['--gc-between-tests']))
338
339     def test_check_for_world_leaks(self):
340         self.assertTrue(passing_run(['--world-leaks']))
341
342     def test_complex_text(self):
343         self.assertTrue(passing_run(['--complex-text']))
344
345     def test_threaded(self):
346         self.assertTrue(passing_run(['--threaded']))
347
348     def test_repeat_each(self):
349         tests_to_run = ['passes/image.html', 'passes/text.html']
350         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
351         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
352
353     def test_ignore_flag(self):
354         # Note that passes/image.html is expected to be run since we specified it directly.
355         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
356         self.assertFalse('passes/text.html' in tests_run)
357         self.assertTrue('passes/image.html' in tests_run)
358
359     def test_skipped_flag(self):
360         tests_run = get_tests_run(['passes'])
361         self.assertFalse('passes/skipped/skip.html' in tests_run)
362         num_tests_run_by_default = len(tests_run)
363
364         # Check that nothing changes when we specify skipped=default.
365         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
366                           num_tests_run_by_default)
367
368         # Now check that we run one more test (the skipped one).
369         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
370         self.assertTrue('passes/skipped/skip.html' in tests_run)
371         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
372
373         # Now check that we only run the skipped test.
374         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
375
376         # Now check that we don't run anything.
377         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
378
379     def test_iterations(self):
380         tests_to_run = ['passes/image.html', 'passes/text.html']
381         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
382         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
383
384     def test_repeat_each_iterations_num_tests(self):
385         # The total number of tests should be: number_of_tests *
386         # repeat_each * iterations
387         host = MockHost()
388         _, err, _ = logging_run(
389             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
390             tests_included=True, host=host)
391         self.assertContains(err, "All 16 tests ran as expected.\n")
392
393     def test_run_chunk(self):
394         # Test that we actually select the right chunk
395         all_tests_run = get_tests_run(['passes', 'failures'])
396         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
397         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
398
399         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
400         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
401         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
402         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
403
404     def test_run_force(self):
405         # This raises an exception because we run
406         # failures/expected/exception.html, which is normally SKIPped.
407
408         self.assertRaises(ValueError, logging_run, ['--force'])
409
410     def test_run_part(self):
411         # Test that we actually select the right part
412         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
413         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
414         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
415
416         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
417         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
418         # last part repeats the first two tests).
419         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
420         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
421
422     def test_run_singly(self):
423         batch_tests_run = get_test_batches(['--run-singly'])
424         for batch in batch_tests_run:
425             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
426
427     def test_skip_failing_tests(self):
428         # This tests that we skip both known failing and known flaky tests. Because there are
429         # no known flaky tests in the default test_expectations, we add additional expectations.
430         host = MockHost()
431         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
432
433         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
434         has_passes_text = False
435         for batch in batches:
436             self.assertFalse('failures/expected/text.html' in batch)
437             self.assertFalse('passes/image.html' in batch)
438             has_passes_text = has_passes_text or ('passes/text.html' in batch)
439         self.assertTrue(has_passes_text)
440
441     def test_run_singly_actually_runs_tests(self):
442         details, _, _ = logging_run(['--run-singly'], tests_included=True)
443         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
444
445     def test_single_file(self):
446         tests_run = get_tests_run(['passes/text.html'])
447         self.assertEqual(tests_run, ['passes/text.html'])
448
449     def test_single_file_with_prefix(self):
450         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
451         self.assertEqual(['passes/text.html'], tests_run)
452
453     def test_single_skipped_file(self):
454         tests_run = get_tests_run(['failures/expected/keybaord.html'])
455         self.assertEqual([], tests_run)
456
457     def test_stderr_is_saved(self):
458         host = MockHost()
459         self.assertTrue(passing_run(host=host))
460         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
461                           'stuff going to stderr')
462
463     def test_test_list(self):
464         host = MockHost()
465         filename = '/tmp/foo.txt'
466         host.filesystem.write_text_file(filename, 'passes/text.html')
467         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
468         self.assertEqual(['passes/text.html'], tests_run)
469         host.filesystem.remove(filename)
470         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
471         self.assertEqual(details.exit_code, -1)
472         self.assertNotEmpty(err)
473
474     def test_test_list_with_prefix(self):
475         host = MockHost()
476         filename = '/tmp/foo.txt'
477         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
478         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
479         self.assertEqual(['passes/text.html'], tests_run)
480
481     def test_missing_and_unexpected_results(self):
482         # Test that we update expectations in place. If the expectation
483         # is missing, update the expected generic location.
484         host = MockHost()
485         details, err, _ = logging_run(['--no-show-results',
486             'failures/expected/missing_image.html',
487             'failures/unexpected/missing_text.html',
488             'failures/unexpected/text-image-checksum.html'],
489             tests_included=True, host=host)
490         file_list = host.filesystem.written_files.keys()
491         self.assertEqual(details.exit_code, 1)
492         expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
493         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
494         self.assertTrue(json_string.find(expected_token) != -1)
495         self.assertTrue(json_string.find('"num_regressions":1') != -1)
496         self.assertTrue(json_string.find('"num_flaky":0') != -1)
497         self.assertTrue(json_string.find('"num_missing":1') != -1)
498
499     def test_pixel_test_directories(self):
500         host = MockHost()
501
502         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
503         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
504                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
505                 'failures/unexpected/image_not_in_pixeldir.html']
506         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
507
508         self.assertEqual(details.exit_code, 1)
509         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
510         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
511         self.assertTrue(json_string.find(expected_token) != -1)
512
513     def test_missing_and_unexpected_results_with_custom_exit_code(self):
514         # Test that we update expectations in place. If the expectation
515         # is missing, update the expected generic location.
516         class CustomExitCodePort(test.TestPort):
517             def exit_code_from_summarized_results(self, unexpected_results):
518                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
519
520         host = MockHost()
521         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
522         test_port = CustomExitCodePort(host, options=options)
523         details, err, _ = logging_run(['--no-show-results',
524             'failures/expected/missing_image.html',
525             'failures/unexpected/missing_text.html',
526             'failures/unexpected/text-image-checksum.html'],
527             tests_included=True, host=host, port_obj=test_port)
528         self.assertEqual(details.exit_code, 2)
529
530     def test_crash_with_stderr(self):
531         host = MockHost()
532         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
533         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
534
535     def test_no_image_failure_with_image_diff(self):
536         host = MockHost()
537         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
538         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
539
540     def test_crash_log(self):
541         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
542         # Currently CrashLog uploading only works on Darwin and Windows.
543         if not self._platform.is_mac() or self._platform.is_win():
544             return
545         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
546         host = MockHost()
547         host.filesystem.write_text_file('/tmp/layout-test-results/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
548         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html', '--dump-render-tree'], tests_included=True, host=host)
549         expected_crash_log = mock_crash_report
550         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
551
552     def test_web_process_crash_log(self):
553         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
554         # Currently CrashLog uploading only works on Darwin and Windows.
555         if not self._platform.is_mac() or self._platform.is_win():
556             return
557         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
558         host = MockHost()
559         host.filesystem.write_text_file('/tmp/layout-test-results/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
560         logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
561         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
562
563     def test_exit_after_n_failures_upload(self):
564         host = MockHost()
565         details, regular_output, user = logging_run(
566            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
567            tests_included=True, host=host)
568
569         # By returning False, we know that the incremental results were generated and then deleted.
570         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
571
572         # This checks that we report only the number of tests that actually failed.
573         self.assertEqual(details.exit_code, 1)
574
575         # This checks that passes/text.html is considered SKIPped.
576         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
577
578         # This checks that we told the user we bailed out.
579         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
580
581         # This checks that neither test ran as expected.
582         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
583         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
584
585     def test_exit_after_n_failures(self):
586         # Unexpected failures should result in tests stopping.
587         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
588         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
589
590         # But we'll keep going for expected ones.
591         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
592         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
593
594     def test_exit_after_n_crashes(self):
595         # Unexpected crashes should result in tests stopping.
596         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
597         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
598
599         # Same with timeouts.
600         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
601         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
602
603         # But we'll keep going for expected ones.
604         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
605         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
606
607     def test_results_directory_absolute(self):
608         # We run a configuration that should fail, to generate output, then
609         # look for what the output results url was.
610
611         host = MockHost()
612         with host.filesystem.mkdtemp() as tmpdir:
613             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
614             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
615
616     def test_results_directory_default(self):
617         # We run a configuration that should fail, to generate output, then
618         # look for what the output results url was.
619
620         # This is the default location.
621         _, _, user = logging_run(tests_included=True)
622         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
623
624     def test_results_directory_relative(self):
625         # We run a configuration that should fail, to generate output, then
626         # look for what the output results url was.
627         host = MockHost()
628         host.filesystem.maybe_make_directory('/tmp/cwd')
629         host.filesystem.chdir('/tmp/cwd')
630         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
631         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
632
633     def test_retrying_and_flaky_tests(self):
634         host = MockHost()
635         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
636         self.assertEqual(details.exit_code, 0)
637         self.assertTrue('Retrying' in err.getvalue())
638         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
639         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
640
641         # Now we test that --clobber-old-results does remove the old entries and the old retries,
642         # and that we don't retry again.
643         host = MockHost()
644         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
645         self.assertEqual(details.exit_code, 1)
646         self.assertTrue('Clobbering old results' in err.getvalue())
647         self.assertTrue('flaky/text.html' in err.getvalue())
648         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
649         self.assertFalse(host.filesystem.exists('retries'))
650
651     def test_retrying_force_pixel_tests(self):
652         host = MockHost()
653         details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
654         self.assertEqual(details.exit_code, 1)
655         self.assertTrue('Retrying' in err.getvalue())
656         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
657         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
658         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
659         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
660         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
661         json = parse_full_results(json_string)
662         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
663             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
664         self.assertFalse(json["pixel_tests_enabled"])
665         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
666
667     def test_failed_text_with_missing_pixel_results_on_retry(self):
668         # Test what happens when pixel results are missing on retry.
669         host = MockHost()
670         details, err, _ = logging_run(['--no-show-results',
671             '--no-new-test-results', '--no-pixel-tests',
672             'failures/unexpected/text-image-missing.html'],
673             tests_included=True, host=host)
674         file_list = host.filesystem.written_files.keys()
675         self.assertEqual(details.exit_code, 1)
676         expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
677         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
678         self.assertTrue(json_string.find(expected_token) != -1)
679         self.assertTrue(json_string.find('"num_regressions":1') != -1)
680         self.assertTrue(json_string.find('"num_flaky":0') != -1)
681         self.assertTrue(json_string.find('"num_missing":1') != -1)
682
683     def test_retrying_uses_retries_directory(self):
684         host = MockHost()
685         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
686         self.assertEqual(details.exit_code, 1)
687         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
688         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
689
690     def test_run_order__inline(self):
691         # These next tests test that we run the tests in ascending alphabetical
692         # order per directory. HTTP tests are sharded separately from other tests,
693         # so we have to test both.
694         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
695         self.assertEqual(tests_run, sorted(tests_run))
696
697         tests_run = get_tests_run(['http/tests/passes'])
698         self.assertEqual(tests_run, sorted(tests_run))
699
700     def test_tolerance(self):
701         class ImageDiffTestPort(test.TestPort):
702             def diff_image(self, expected_contents, actual_contents, tolerance=None):
703                 self.tolerance_used_for_diff_image = self._options.tolerance
704                 return (True, 1, None)
705
706         def get_port_for_run(args):
707             options, parsed_args = run_webkit_tests.parse_args(args)
708             host = MockHost()
709             test_port = ImageDiffTestPort(host, options=options)
710             res = passing_run(args, port_obj=test_port, tests_included=True)
711             self.assertTrue(res)
712             return test_port
713
714         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
715
716         # If we pass in an explicit tolerance argument, then that will be used.
717         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
718         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
719         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
720         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
721
722         # Otherwise the port's default tolerance behavior (including ignoring it)
723         # should be used.
724         test_port = get_port_for_run(base_args)
725         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
726
727     def test_reftest_run(self):
728         tests_run = get_tests_run(['passes/reftest.html'])
729         self.assertEqual(['passes/reftest.html'], tests_run)
730
731     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
732         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
733         self.assertEqual(['passes/reftest.html'], tests_run)
734
735     def test_reftest_skip_reftests_if_no_ref_tests(self):
736         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
737         self.assertEqual([], tests_run)
738         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
739         self.assertEqual([], tests_run)
740
741     def test_reftest_expected_html_should_be_ignored(self):
742         tests_run = get_tests_run(['passes/reftest-expected.html'])
743         self.assertEqual([], tests_run)
744
745     def test_reftest_driver_should_run_expected_html(self):
746         tests_run = get_test_results(['passes/reftest.html'])
747         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
748
749     def test_reftest_driver_should_run_expected_mismatch_html(self):
750         tests_run = get_test_results(['passes/mismatch.html'])
751         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
752
753     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
754         host = MockHost()
755         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
756         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
757         self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
758         self.assertTrue(json_string.find('"num_regressions":4') != -1)
759         self.assertTrue(json_string.find('"num_flaky":0') != -1)
760         self.assertTrue(json_string.find('"num_missing":1') != -1)
761
762     def test_additional_platform_directory(self):
763         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
764         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
765         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
766         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
767
768     def test_additional_expectations(self):
769         host = MockHost()
770         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
771         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
772                                     tests_included=True, host=host))
773
774     def test_no_http_and_force(self):
775         # See test_run_force, using --force raises an exception.
776         # FIXME: We would like to check the warnings generated.
777         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
778
779     @staticmethod
780     def has_test_of_type(tests, type):
781         return [test for test in tests if type in test]
782
783     def test_no_http_tests(self):
784         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
785         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
786         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
787
788         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
789         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
790         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
791
792         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
793         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
794         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
795
796     def test_platform_tests_are_found(self):
797         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
798         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
799         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
800
801     def test_output_diffs(self):
802         # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
803         host = MockHost()
804         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
805         written_files = host.filesystem.written_files
806         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
807         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
808
809         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
810         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
811         self.assertEqual(full_results['has_pretty_patch'], False)
812
813     def test_unsupported_platform(self):
814         stdout = StringIO.StringIO()
815         stderr = StringIO.StringIO()
816         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
817
818         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
819         self.assertEqual(stdout.getvalue(), '')
820         self.assertTrue('unsupported platform' in stderr.getvalue())
821
822     def test_verbose_in_child_processes(self):
823         # When we actually run multiple processes, we may have to reconfigure logging in the
824         # child process (e.g., on win32) and we need to make sure that works and we still
825         # see the verbose log output. However, we can't use logging_run() because using
826         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
827
828         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
829         if not self.should_test_processes:
830             return
831
832         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
833         host = MockHost()
834         port_obj = host.port_factory.get(port_name=options.platform, options=options)
835         logging_stream = StringIO.StringIO()
836         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
837         self.assertTrue('text.html passed' in logging_stream.getvalue())
838         self.assertTrue('image.html passed' in logging_stream.getvalue())
839
840     def test_device_type_test_division(self):
841         host = MockHost()
842         port = host.port_factory.get('ios-simulator')
843
844         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
845         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
846         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
847         host.filesystem.write_text_file('/MOCK output of child process/ImageDiff', '')
848
849         oc = outputcapture.OutputCapture()
850         try:
851             oc.capture_output()
852             logging = StringIO.StringIO()
853             run_webkit_tests.run(port, run_webkit_tests.parse_args(['--debug-rwt-logging', '-n', '--no-build', '--root', '/build'])[0], [], logging_stream=logging)
854         finally:
855             output, err, _ = oc.restore_output()
856
857         for line in logging.getvalue():
858             if str(DeviceType.from_string('iPhone SE')) in line:
859                 self.assertTrue('Skipping 2 tests' in line)
860             elif str(DeviceType.from_string('iPhone (5th generation)')) in line:
861                 self.assertTrue('Skipping 1 test' in line)
862             elif str(DeviceType.from_string('iPhone 7')) in line:
863                 self.assertTrue('Skipping 0 tests' in line)
864
865     def test_device_type_specific_listing(self):
866         host = MockHost()
867         port = host.port_factory.get('ios-simulator')
868
869         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
870         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
871         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
872
873         oc = outputcapture.OutputCapture()
874         try:
875             oc.capture_output()
876             logging = StringIO.StringIO()
877             run_webkit_tests._print_expectations(port, run_webkit_tests.parse_args([])[0], [], logging_stream=logging)
878         finally:
879             output, _, _ = oc.restore_output()
880
881         current_type = None
882         by_type = {}
883         for line in output.splitlines():
884             if not line:
885                 continue
886             if 'Tests to run' in line:
887                 current_type = DeviceType.from_string(line.split('for ')[-1].split(' running')[0]) if 'for ' in line else None
888                 by_type[current_type] = []
889                 continue
890             by_type[current_type].append(line)
891
892         self.assertEqual(3, len(by_type.keys()))
893         self.assertEqual(2, len(by_type[DeviceType.from_string('iPhone SE')]))
894         self.assertEqual(1, len(by_type[DeviceType.from_string('iPad (5th generation)')]))
895         self.assertEqual(0, len(by_type[DeviceType.from_string('iPhone 7')]))
896
897
898 class EndToEndTest(unittest.TestCase):
899     def test_reftest_with_two_notrefs(self):
900         # Test that we update expectations in place. If the expectation
901         # is missing, update the expected generic location.
902         host = MockHost()
903         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
904         file_list = host.filesystem.written_files.keys()
905
906         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
907         json = parse_full_results(json_string)
908         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
909         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
910         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
911         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
912             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"})
913         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
914             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"})
915         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
916             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"})
917
918
919 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
920     def assertBaselines(self, file_list, file, extensions, err):
921         "assert that the file_list contains the baselines."""
922         for ext in extensions:
923             baseline = file + "-expected" + ext
924             baseline_msg = 'Writing new expected result "%s"\n' % baseline
925             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
926             self.assertContains(err, baseline_msg)
927
928     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
929     # supposed to be.
930
931     def test_reset_results(self):
932         # Test that we update expectations in place. If the expectation
933         # is missing, update the expected generic location.
934         host = MockHost()
935         details, err, _ = logging_run(
936             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
937             tests_included=True, host=host, new_results=True)
938         file_list = host.filesystem.written_files.keys()
939         self.assertEqual(details.exit_code, 0)
940         self.assertEqual(len(file_list), 9)
941         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
942         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
943
944     def test_missing_results(self):
945         # Test that we update expectations in place. If the expectation
946         # is missing, update the expected generic location.
947         host = MockHost()
948         details, err, _ = logging_run(['--no-show-results',
949             'failures/unexpected/missing_text.html',
950             'failures/unexpected/missing_image.html',
951             'failures/unexpected/missing_audio.html',
952             'failures/unexpected/missing_render_tree_dump.html'],
953             tests_included=True, host=host, new_results=True)
954         file_list = host.filesystem.written_files.keys()
955         self.assertEqual(details.exit_code, 0)
956         self.assertEqual(len(file_list), 11)
957         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
958         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
959         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
960
961     def test_new_baseline(self):
962         # Test that we update the platform expectations in the version-specific directories
963         # for both existing and new baselines.
964         host = MockHost()
965         details, err, _ = logging_run(
966             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
967             tests_included=True, host=host, new_results=True)
968         file_list = host.filesystem.written_files.keys()
969         self.assertEqual(details.exit_code, 0)
970         self.assertEqual(len(file_list), 9)
971         self.assertBaselines(file_list,
972             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
973         self.assertBaselines(file_list,
974             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
975
976
977 class PortTest(unittest.TestCase):
978     def assert_mock_port_works(self, port_name, args=[]):
979         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
980
981     def disabled_test_mac_lion(self):
982         self.assert_mock_port_works('mac-lion')
983
984
985 class MainTest(unittest.TestCase):
986     def test_exception_handling(self):
987         orig_run_fn = run_webkit_tests.run
988
989         # unused args pylint: disable=W0613
990         def interrupting_run(port, options, args, stderr):
991             raise KeyboardInterrupt
992
993         def successful_run(port, options, args, stderr):
994
995             class FakeRunDetails(object):
996                 exit_code = -1
997
998             return FakeRunDetails()
999
1000         def exception_raising_run(port, options, args, stderr):
1001             assert False
1002
1003         stdout = StringIO.StringIO()
1004         stderr = StringIO.StringIO()
1005         try:
1006             run_webkit_tests.run = interrupting_run
1007             res = run_webkit_tests.main([], stdout, stderr)
1008             self.assertEqual(res, INTERRUPTED_EXIT_STATUS)
1009
1010             run_webkit_tests.run = successful_run
1011             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1012             self.assertEqual(res, -1)
1013
1014             run_webkit_tests.run = exception_raising_run
1015             res = run_webkit_tests.main([], stdout, stderr)
1016             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
1017         finally:
1018             run_webkit_tests.run = orig_run_fn