[GTK][WPE] Bump libsoup3 version to 2.99.1
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import json
32 import sys
33 import unittest
34
35 from webkitcorepy import StringIO, OutputCapture
36
37 from webkitpy.common.system import path
38 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
39 from webkitpy.common.system.systemhost import SystemHost
40 from webkitpy.common.host import Host
41 from webkitpy.common.host_mock import MockHost
42 from webkitpy.layout_tests import run_webkit_tests
43 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
44 from webkitpy.port import test
45 from webkitpy.xcode.device_type import DeviceType
46
47
48 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
49     extra_args = extra_args or []
50     args = []
51     if not '--platform' in extra_args:
52         args.extend(['--platform', 'test'])
53     if not new_results:
54         args.append('--no-new-test-results')
55
56     if not '--child-processes' in extra_args:
57         args.extend(['--child-processes', 1])
58
59     if not '--world-leaks' in extra_args:
60         args.append('--world-leaks')
61
62     if not '--accessibility-isolated-tree' in extra_args:
63         args.append('--accessibility-isolated-tree')
64
65     args.extend(extra_args)
66     if not tests_included:
67         # We use the glob to test that globbing works.
68         args.extend(['passes',
69                      'http/tests',
70                      'websocket/tests',
71                      'failures/expected/*'])
72     return run_webkit_tests.parse_args(args)
73
74
75 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
76     options, parsed_args = parse_args(extra_args, tests_included)
77     if not port_obj:
78         host = host or MockHost()
79         port_obj = host.port_factory.get(port_name=options.platform, options=options)
80
81     if shared_port:
82         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
83
84     logging_stream = StringIO()
85     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
86     return run_details.exit_code == 0
87
88
89 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
90     options, parsed_args = parse_args(extra_args=extra_args,
91                                       tests_included=tests_included,
92                                       print_nothing=False, new_results=new_results)
93     host = host or MockHost()
94     if not port_obj:
95         port_obj = host.port_factory.get(port_name=options.platform, options=options)
96
97     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
98     return (run_details, output, host.user)
99
100
101 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
102     if shared_port:
103         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
104     with OutputCapture():
105         logging_stream = StringIO()
106         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
107
108     return (run_details, logging_stream)
109
110
111 def get_tests_run(args, host=None):
112     results = get_test_results(args, host)
113     return [result.test_name for result in results]
114
115
116 def get_test_batches(args, host=None):
117     results = get_test_results(args, host)
118     batches = []
119     batch = []
120     current_pid = None
121     for result in results:
122         if batch and result.pid != current_pid:
123             batches.append(batch)
124             batch = []
125         batch.append(result.test_name)
126     if batch:
127         batches.append(batch)
128     return batches
129
130
131 def get_test_results(args, host=None):
132     options, parsed_args = parse_args(args, tests_included=True)
133
134     host = host or MockHost()
135     port_obj = host.port_factory.get(port_name=options.platform, options=options)
136
137     with OutputCapture():
138         logging_stream = StringIO()
139         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
140
141     all_results = []
142     if run_details.initial_results:
143         all_results.extend(run_details.initial_results.all_results)
144
145     if run_details.retry_results:
146         all_results.extend(run_details.retry_results.all_results)
147     return all_results
148
149
150 def parse_full_results(full_results_text):
151     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
152     compressed_results = json.loads(json_to_eval)
153     return compressed_results
154
155
156 class StreamTestingMixin(object):
157     def assertContains(self, stream, string):
158         self.assertTrue(string in stream.getvalue())
159
160     def assertEmpty(self, stream):
161         self.assertFalse(stream.getvalue())
162
163     def assertNotEmpty(self, stream):
164         self.assertTrue(stream.getvalue())
165
166
167 class RunTest(unittest.TestCase, StreamTestingMixin):
168     def setUp(self):
169         # A real PlatformInfo object is used here instead of a
170         # MockPlatformInfo because we need to actually check for
171         # Windows and Mac to skip some tests.
172         self._platform = SystemHost().platform
173
174         # FIXME: Remove this when we fix test-webkitpy to work
175         # properly on cygwin (bug 63846).
176         # FIXME: Multiprocessing doesn't do well when nested in Python 3 (https://bugs.webkit.org/show_bug.cgi?id=205280)
177         self.should_test_processes = not self._platform.is_win() and sys.version_info < (3, 0)
178
179     def test_basic(self):
180         options, args = parse_args(tests_included=True)
181         logging_stream = StringIO()
182         host = MockHost()
183         port_obj = host.port_factory.get(options.platform, options)
184         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
185
186         # These numbers will need to be updated whenever we add new tests.
187         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
188         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
189         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
190         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
191         self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
192
193         one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
194             details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
195             len(details.initial_results.unexpected_results_by_name))
196         self.assertTrue(one_line_summary in logging_stream.getvalue())
197
198         # Ensure the results were summarized properly.
199         self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
200
201         # Ensure the image diff percentage is in the results.
202         self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
203
204         # Ensure the results were written out and displayed.
205         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
206         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
207         self.assertEqual(json.loads(json_to_eval), details.summarized_results)
208
209         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
210
211     def test_batch_size(self):
212         batch_tests_run = get_test_batches(['--batch-size', '2'])
213         for batch in batch_tests_run:
214             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
215
216     def test_child_processes_2(self):
217         if self.should_test_processes:
218             _, regular_output, _ = logging_run(
219                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
220             self.assertTrue(any(['Running 2 ' in line for line in regular_output.getvalue().splitlines()]))
221
222     def test_child_processes_min(self):
223         if self.should_test_processes:
224             _, regular_output, _ = logging_run(
225                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
226                 tests_included=True, shared_port=False)
227             self.assertTrue(any(['Running 1 ' in line for line in regular_output.getvalue().splitlines()]))
228
229     def test_dryrun(self):
230         tests_run = get_tests_run(['--dry-run'])
231         self.assertEqual(tests_run, [])
232
233         tests_run = get_tests_run(['-n'])
234         self.assertEqual(tests_run, [])
235
236     def test_exception_raised(self):
237         # Exceptions raised by a worker are treated differently depending on
238         # whether they are in-process or out. inline exceptions work as normal,
239         # which allows us to get the full stack trace and traceback from the
240         # worker. The downside to this is that it could be any error, but this
241         # is actually useful in testing.
242         #
243         # Exceptions raised in a separate process are re-packaged into
244         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
245         # be printed, but don't display properly in the unit test exception handlers.
246         self.assertRaises(BaseException, logging_run,
247             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
248
249         if self.should_test_processes:
250             self.assertRaises(BaseException, logging_run,
251                 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
252
253     def test_full_results_html(self):
254         # FIXME: verify html?
255         details, _, _ = logging_run(['--full-results-html'])
256         self.assertEqual(details.exit_code, 0)
257
258     def test_hung_thread(self):
259         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
260         # Note that hang.html is marked as WontFix and all WontFix tests are
261         # expected to Pass, so that actually running them generates an "unexpected" error.
262         self.assertEqual(details.exit_code, 1)
263         self.assertNotEmpty(err)
264
265     def test_keyboard_interrupt(self):
266         # Note that this also tests running a test marked as SKIP if
267         # you specify it explicitly.
268         details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
269         self.assertEqual(details.exit_code, INTERRUPTED_EXIT_STATUS)
270
271         if self.should_test_processes:
272             _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
273             self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.getvalue().splitlines()]))
274
275     def test_all_tests_skipped(self):
276         details, err, _ = logging_run(['foo'], tests_included=True)
277         self.assertEqual(details.exit_code, 0)
278         self.assertContains(err, 'All tests skipped.\n')
279
280     def test_natural_order(self):
281         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
282         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
283         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
284
285     def test_natural_order_test_specified_multiple_times(self):
286         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
287         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
288         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
289
290     def test_random_order(self):
291         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
292         tests_run = get_tests_run(['--order=random'] + tests_to_run)
293         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
294
295     def test_random_order_test_specified_multiple_times(self):
296         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
297         tests_run = get_tests_run(['--order=random'] + tests_to_run)
298         self.assertEqual(tests_run.count('passes/audio.html'), 2)
299         self.assertEqual(tests_run.count('passes/args.html'), 2)
300
301     def test_no_order(self):
302         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
303         tests_run = get_tests_run(['--order=none'] + tests_to_run)
304         self.assertEqual(tests_to_run, tests_run)
305
306     def test_no_order_test_specified_multiple_times(self):
307         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
308         tests_run = get_tests_run(['--order=none'] + tests_to_run)
309         self.assertEqual(tests_to_run, tests_run)
310
311     def test_no_order_with_directory_entries_in_natural_order(self):
312         tests_to_run = ['http/tests/ssl', 'http/tests/passes']
313         tests_run = get_tests_run(['--order=none'] + tests_to_run)
314         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
315
316     def test_gc_between_tests(self):
317         self.assertTrue(passing_run(['--gc-between-tests']))
318
319     def test_check_for_world_leaks(self):
320         self.assertTrue(passing_run(['--world-leaks']))
321
322     def test_complex_text(self):
323         self.assertTrue(passing_run(['--complex-text']))
324
325     def test_threaded(self):
326         self.assertTrue(passing_run(['--threaded']))
327
328     def test_repeat_each(self):
329         tests_to_run = ['passes/image.html', 'passes/text.html']
330         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
331         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
332
333     def test_ignore_flag(self):
334         # Note that passes/image.html is expected to be run since we specified it directly.
335         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
336         self.assertFalse('passes/text.html' in tests_run)
337         self.assertTrue('passes/image.html' in tests_run)
338
339     def test_skipped_flag(self):
340         tests_run = get_tests_run(['passes'])
341         self.assertFalse('passes/skipped/skip.html' in tests_run)
342         num_tests_run_by_default = len(tests_run)
343
344         # Check that nothing changes when we specify skipped=default.
345         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
346                           num_tests_run_by_default)
347
348         # Now check that we run one more test (the skipped one).
349         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
350         self.assertTrue('passes/skipped/skip.html' in tests_run)
351         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
352
353         # Now check that we only run the skipped test.
354         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
355
356         # Now check that we don't run anything.
357         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
358
359     def test_iterations(self):
360         tests_to_run = ['passes/image.html', 'passes/text.html']
361         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
362         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
363
364     def test_repeat_each_iterations_num_tests(self):
365         # The total number of tests should be: number_of_tests *
366         # repeat_each * iterations
367         host = MockHost()
368         _, err, _ = logging_run(
369             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
370             tests_included=True, host=host)
371         self.assertContains(err, "All 16 tests ran as expected.\n")
372
373     def test_run_chunk(self):
374         # Test that we actually select the right chunk
375         all_tests_run = get_tests_run(['passes', 'failures'])
376         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
377         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
378
379         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
380         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
381         chunk_tests_run = get_tests_run(['--run-chunk', '1:3', '--skipped', 'always'] + tests_to_run)
382         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
383
384     def test_run_force(self):
385         # This raises an exception because we run
386         # failures/expected/exception.html, which is normally SKIPped.
387
388         self.assertRaises(ValueError, logging_run, ['--force'])
389
390     def test_run_part(self):
391         # Test that we actually select the right part
392         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
393         tests_run = get_tests_run(['--run-part', '1:2', '--skipped', 'always'] + tests_to_run)
394         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
395
396         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
397         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
398         # last part repeats the first two tests).
399         chunk_tests_run = get_tests_run(['--run-part', '3:3', '--skipped', 'always'] + tests_to_run)
400         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
401
402     def test_run_singly(self):
403         batch_tests_run = get_test_batches(['--run-singly'])
404         for batch in batch_tests_run:
405             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
406
407     def test_skip_failing_tests(self):
408         # This tests that we skip both known failing and known flaky tests. Because there are
409         # no known flaky tests in the default test_expectations, we add additional expectations.
410         host = MockHost()
411         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
412
413         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
414         has_passes_text = False
415         for batch in batches:
416             self.assertFalse('failures/expected/text.html' in batch)
417             self.assertFalse('passes/image.html' in batch)
418             has_passes_text = has_passes_text or ('passes/text.html' in batch)
419         self.assertTrue(has_passes_text)
420
421     def test_run_singly_actually_runs_tests(self):
422         details, _, _ = logging_run(['--run-singly'], tests_included=True)
423         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
424
425     def test_single_file(self):
426         tests_run = get_tests_run(['passes/text.html'])
427         self.assertEqual(tests_run, ['passes/text.html'])
428
429     def test_single_file_with_prefix(self):
430         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
431         self.assertEqual(['passes/text.html'], tests_run)
432
433     def test_single_skipped_file(self):
434         tests_run = get_tests_run(['failures/expected/keybaord.html'])
435         self.assertEqual([], tests_run)
436
437     def test_stderr_is_saved(self):
438         host = MockHost()
439         self.assertTrue(passing_run(host=host))
440         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
441                           'stuff going to stderr')
442
443     def test_test_list(self):
444         host = MockHost()
445         filename = '/tmp/foo.txt'
446         host.filesystem.write_text_file(filename, 'passes/text.html')
447         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
448         self.assertEqual(['passes/text.html'], tests_run)
449         host.filesystem.remove(filename)
450         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
451         self.assertEqual(details.exit_code, -1)
452         self.assertNotEmpty(err)
453
454     def test_test_list_with_prefix(self):
455         host = MockHost()
456         filename = '/tmp/foo.txt'
457         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
458         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
459         self.assertEqual(['passes/text.html'], tests_run)
460
461     def test_missing_and_unexpected_results(self):
462         # Test that we update expectations in place. If the expectation
463         # is missing, update the expected generic location.
464         host = MockHost()
465         details, err, _ = logging_run(['--no-show-results',
466             'failures/expected/missing_image.html',
467             'failures/unexpected/missing_text.html',
468             'failures/unexpected/text-image-checksum.html'],
469             tests_included=True, host=host)
470         file_list = host.filesystem.written_files.keys()
471         self.assertEqual(details.exit_code, 1)
472         expected_dictionary = {
473             'version': 4,
474             'fixable': 3,
475             'skipped': 0,
476             'num_passes': 0,
477             'num_flaky': 0,
478             'num_missing': 1,
479             'num_regressions': 1,
480             'uses_expectations_file': True,
481             'interrupted': False,
482             'layout_tests_dir': '/test.checkout/LayoutTests',
483             'has_pretty_patch': False,
484             'pixel_tests_enabled': True,
485             'other_crashes': {},
486             'date': '10:10AM on December 13, 2019',
487             'tests': {
488                 'failures': {
489                     'expected': {
490                         'missing_image.html': {
491                             'expected': 'PASS MISSING',
492                             'actual': 'MISSING',
493                             'is_missing_image': True,
494                         },
495                     }, 'unexpected': {
496                         'missing_text.html': {
497                             'report': 'MISSING',
498                             'expected': 'PASS',
499                             'actual': 'MISSING',
500                             'is_missing_text': True,
501                         }, 'text-image-checksum.html': {
502                             'report': 'REGRESSION',
503                             'expected': 'PASS',
504                             'actual': 'IMAGE+TEXT',
505                             'image_diff_percent': 1,
506                         },
507                     },
508                 },
509             },
510         }
511         actual_dictionary = json.loads(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')[len('ADD_RESULTS('):-2])
512         self.assertEqual(
513             sorted(list(expected_dictionary['tests']['failures']['expected'])),
514             sorted(list(actual_dictionary['tests']['failures']['expected'])),
515         )
516         self.assertEqual(
517             sorted(list(expected_dictionary['tests']['failures']['unexpected'])),
518             sorted(list(actual_dictionary['tests']['failures']['unexpected'])),
519         )
520         self.assertEqual(expected_dictionary['num_regressions'], actual_dictionary['num_regressions'])
521         self.assertEqual(expected_dictionary['num_flaky'], actual_dictionary['num_flaky'])
522         self.assertEqual(expected_dictionary['num_missing'], actual_dictionary['num_missing'])
523
524     def test_pixel_test_directories(self):
525         host = MockHost()
526
527         """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
528         args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
529                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
530                 'failures/unexpected/image_not_in_pixeldir.html']
531         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
532
533         self.assertEqual(details.exit_code, 1)
534         expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
535         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
536         self.assertTrue(json_string.find(expected_token) != -1)
537
538     def test_missing_and_unexpected_results_with_custom_exit_code(self):
539         # Test that we update expectations in place. If the expectation
540         # is missing, update the expected generic location.
541         class CustomExitCodePort(test.TestPort):
542             def exit_code_from_summarized_results(self, unexpected_results):
543                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
544
545         host = MockHost()
546         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
547         test_port = CustomExitCodePort(host, options=options)
548         details, err, _ = logging_run(['--no-show-results',
549             'failures/expected/missing_image.html',
550             'failures/unexpected/missing_text.html',
551             'failures/unexpected/text-image-checksum.html'],
552             tests_included=True, host=host, port_obj=test_port)
553         self.assertEqual(details.exit_code, 2)
554
555     def test_crash_with_stderr(self):
556         host = MockHost()
557         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
558         actual_dictionary = json.loads(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')[len('ADD_RESULTS('):-2])
559         expected_dictionary = {
560             'version': 4,
561             'fixable': 1,
562             'skipped': 0,
563             'num_passes': 0,
564             'num_flaky': 0,
565             'num_missing': 0,
566             'num_regressions': 1,
567             'uses_expectations_file': True,
568             'interrupted': False,
569             'layout_tests_dir': '/test.checkout/LayoutTests',
570             'has_pretty_patch': False,
571             'pixel_tests_enabled': True,
572             'other_crashes': {},
573             'date': '10:18AM on December 13, 2019',
574             'tests': {
575                 'failures': {
576                     'unexpected': {
577                         'crash-with-stderr.html': {
578                             'has_stderr': True,
579                             'report': 'REGRESSION',
580                             'expected': 'PASS',
581                             'actual': 'CRASH',
582                         },
583                     },
584                 },
585             },
586         }
587         self.assertEqual(
588             sorted(list(expected_dictionary['tests']['failures']['unexpected'])),
589             sorted(list(actual_dictionary['tests']['failures']['unexpected'])),
590         )
591
592     def test_no_image_failure_with_image_diff(self):
593         host = MockHost()
594         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
595         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
596
597     def test_crash_log(self):
598         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
599         # Currently CrashLog uploading only works on Darwin and Windows.
600         if not self._platform.is_mac() or self._platform.is_win():
601             return
602         mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
603         host = MockHost()
604         host.filesystem.write_text_file('/tmp/layout-test-results/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
605         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html', '--dump-render-tree'], tests_included=True, host=host)
606         expected_crash_log = mock_crash_report
607         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
608
609     def test_web_process_crash_log(self):
610         # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
611         # Currently CrashLog uploading only works on Darwin and Windows.
612         if not self._platform.is_mac() or self._platform.is_win():
613             return
614         mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
615         host = MockHost()
616         host.filesystem.write_text_file('/tmp/layout-test-results/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
617         logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
618         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
619
620     def test_exit_after_n_failures_upload(self):
621         host = MockHost()
622         details, regular_output, user = logging_run(
623            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
624            tests_included=True, host=host)
625
626         # By returning False, we know that the incremental results were generated and then deleted.
627         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
628
629         # This checks that we report only the number of tests that actually failed.
630         self.assertEqual(details.exit_code, 1)
631
632         # This checks that passes/text.html is considered SKIPped.
633         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
634
635         # This checks that we told the user we bailed out.
636         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
637
638         # This checks that neither test ran as expected.
639         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
640         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
641
642     def test_exit_after_n_failures(self):
643         # Unexpected failures should result in tests stopping.
644         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
645         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
646
647         # But we'll keep going for expected ones.
648         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
649         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
650
651     def test_exit_after_n_crashes(self):
652         # Unexpected crashes should result in tests stopping.
653         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
654         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
655
656         # Same with timeouts.
657         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
658         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
659
660         # But we'll keep going for expected ones.
661         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
662         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
663
664     def test_results_directory_absolute(self):
665         # We run a configuration that should fail, to generate output, then
666         # look for what the output results url was.
667
668         host = MockHost()
669         with host.filesystem.mkdtemp() as tmpdir:
670             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
671             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
672
673     def test_results_directory_default(self):
674         # We run a configuration that should fail, to generate output, then
675         # look for what the output results url was.
676
677         # This is the default location.
678         _, _, user = logging_run(tests_included=True)
679         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
680
681     def test_results_directory_relative(self):
682         # We run a configuration that should fail, to generate output, then
683         # look for what the output results url was.
684         host = MockHost()
685         host.filesystem.maybe_make_directory('/tmp/cwd')
686         host.filesystem.chdir('/tmp/cwd')
687         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
688         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
689
690     def test_retrying_and_flaky_tests(self):
691         host = MockHost()
692         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
693         self.assertEqual(details.exit_code, 0)
694         self.assertTrue('Retrying' in err.getvalue())
695         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
696         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
697
698         # Now we test that --clobber-old-results does remove the old entries and the old retries,
699         # and that we don't retry again.
700         host = MockHost()
701         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
702         self.assertEqual(details.exit_code, 1)
703         self.assertTrue('Deleting results directory' in err.getvalue())
704         self.assertTrue('flaky/text.html' in err.getvalue())
705         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
706         self.assertFalse(host.filesystem.exists('retries'))
707
708     def test_retrying_force_pixel_tests(self):
709         host = MockHost()
710         details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
711         self.assertEqual(details.exit_code, 1)
712         self.assertTrue('Retrying' in err.getvalue())
713         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
714         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
715         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
716         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
717         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
718         json = parse_full_results(json_string)
719         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
720             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
721         self.assertFalse(json["pixel_tests_enabled"])
722         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
723
724     def test_failed_text_with_missing_pixel_results_on_retry(self):
725         # Test what happens when pixel results are missing on retry.
726         host = MockHost()
727         details, err, _ = logging_run(['--no-show-results',
728             '--no-new-test-results', '--no-pixel-tests',
729             'failures/unexpected/text-image-missing.html'],
730             tests_included=True, host=host)
731         file_list = host.filesystem.written_files.keys()
732         self.assertEqual(details.exit_code, 1)
733         expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
734         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
735         self.assertTrue(json_string.find(expected_token) != -1)
736         self.assertTrue(json_string.find('"num_regressions":1') != -1)
737         self.assertTrue(json_string.find('"num_flaky":0') != -1)
738         self.assertTrue(json_string.find('"num_missing":1') != -1)
739
740     def test_retrying_uses_retries_directory(self):
741         host = MockHost()
742         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
743         self.assertEqual(details.exit_code, 1)
744         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
745         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
746
747     def test_run_order__inline(self):
748         # These next tests test that we run the tests in ascending alphabetical
749         # order per directory. HTTP tests are sharded separately from other tests,
750         # so we have to test both.
751         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
752         self.assertEqual(tests_run, sorted(tests_run))
753
754         tests_run = get_tests_run(['http/tests/passes'])
755         self.assertEqual(tests_run, sorted(tests_run))
756
757     def test_tolerance(self):
758         class ImageDiffTestPort(test.TestPort):
759             def diff_image(self, expected_contents, actual_contents, tolerance=None):
760                 self.tolerance_used_for_diff_image = self._options.tolerance
761                 return (True, 1, None)
762
763         def get_port_for_run(args):
764             options, parsed_args = run_webkit_tests.parse_args(args)
765             host = MockHost()
766             test_port = ImageDiffTestPort(host, options=options)
767             res = passing_run(args, port_obj=test_port, tests_included=True)
768             self.assertTrue(res)
769             return test_port
770
771         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
772
773         # If we pass in an explicit tolerance argument, then that will be used.
774         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
775         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
776         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
777         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
778
779         # Otherwise the port's default tolerance behavior (including ignoring it)
780         # should be used.
781         test_port = get_port_for_run(base_args)
782         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
783
784     def test_reftest_run(self):
785         tests_run = get_tests_run(['passes/reftest.html'])
786         self.assertEqual(['passes/reftest.html'], tests_run)
787
788     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
789         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
790         self.assertEqual(['passes/reftest.html'], tests_run)
791
792     def test_reftest_skip_reftests_if_no_ref_tests(self):
793         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
794         self.assertEqual([], tests_run)
795         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
796         self.assertEqual([], tests_run)
797
798     def test_reftest_expected_html_should_be_ignored(self):
799         tests_run = get_tests_run(['passes/reftest-expected.html'])
800         self.assertEqual([], tests_run)
801
802     def test_reftest_driver_should_run_expected_html(self):
803         tests_run = get_test_results(['passes/reftest.html'])
804         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
805
806     def test_reftest_driver_should_run_expected_mismatch_html(self):
807         tests_run = get_test_results(['passes/mismatch.html'])
808         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
809
810     def test_additional_platform_directory(self):
811         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
812         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
813         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
814         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
815
816     def test_additional_expectations(self):
817         host = MockHost()
818         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
819         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
820                                     tests_included=True, host=host))
821
822     def test_no_http_and_force(self):
823         # See test_run_force, using --force raises an exception.
824         # FIXME: We would like to check the warnings generated.
825         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
826
827     @staticmethod
828     def has_test_of_type(tests, type):
829         return [test for test in tests if type in test]
830
831     def test_no_http_tests(self):
832         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
833         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
834         self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
835
836         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
837         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
838         self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
839
840         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
841         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
842         self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
843
844     def test_platform_tests_are_found(self):
845         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
846         self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
847         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
848
849     def test_output_diffs(self):
850         # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
851         host = MockHost()
852         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
853         written_files = host.filesystem.written_files
854         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
855         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
856
857         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
858         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
859         self.assertEqual(full_results['has_pretty_patch'], False)
860
861     def test_unsupported_platform(self):
862         stdout = StringIO()
863         stderr = StringIO()
864         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
865
866         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
867         self.assertEqual(stdout.getvalue(), '')
868         self.assertTrue('unsupported platform' in stderr.getvalue())
869
870     def test_verbose_in_child_processes(self):
871         # When we actually run multiple processes, we may have to reconfigure logging in the
872         # child process (e.g., on win32) and we need to make sure that works and we still
873         # see the verbose log output. However, we can't use logging_run() because using
874         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
875
876         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
877         if not self.should_test_processes:
878             return
879
880         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
881         host = MockHost()
882         port_obj = host.port_factory.get(port_name=options.platform, options=options)
883         logging_stream = StringIO()
884         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
885         self.assertTrue('text.html passed' in logging_stream.getvalue())
886         self.assertTrue('image.html passed' in logging_stream.getvalue())
887
888     def test_device_type_test_division(self):
889         host = MockHost()
890         port = host.port_factory.get('ios-simulator')
891
892         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
893         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
894         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
895         host.filesystem.write_text_file('/MOCK output of child process/ImageDiff', '')
896
897         with OutputCapture() as captured:
898             logging = StringIO()
899             run_webkit_tests.run(port, run_webkit_tests.parse_args(['--debug-rwt-logging', '-n', '--no-build', '--root', '/build'])[0], [], logging_stream=logging)
900
901         for line in logging.getvalue():
902             if str(DeviceType.from_string('iPhone SE')) in line:
903                 self.assertTrue('Skipping 2 tests' in line)
904             elif str(DeviceType.from_string('iPad (5th generation)')) in line:
905                 self.assertTrue('Skipping 1 test' in line)
906             elif str(DeviceType.from_string('iPhone 7')) in line:
907                 self.assertTrue('Skipping 0 tests' in line)
908
909     def test_device_type_specific_listing(self):
910         host = MockHost()
911         port = host.port_factory.get('ios-simulator')
912
913         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
914         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
915         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
916
917         with OutputCapture() as captured:
918             logging = StringIO()
919             run_webkit_tests._print_expectations(port, run_webkit_tests.parse_args([])[0], [], logging_stream=logging)
920
921         current_type = None
922         by_type = {}
923         for line in captured.stdout.getvalue().splitlines():
924             if not line or 'skip' in line:
925                 continue
926             if 'Tests to run' in line:
927                 current_type = DeviceType.from_string(line.split('for ')[-1].split(' running')[0]) if 'for ' in line else None
928                 by_type[current_type] = []
929                 continue
930             by_type[current_type].append(line)
931
932         self.assertEqual(3, len(by_type.keys()))
933         self.assertEqual(2, len(by_type[DeviceType.from_string('iPhone SE')]))
934         self.assertEqual(1, len(by_type[DeviceType.from_string('iPad (5th generation)')]))
935         self.assertEqual(0, len(by_type[DeviceType.from_string('iPhone 7')]))
936
937     def test_ipad_test_division(self):
938         host = MockHost()
939         port = host.port_factory.get('ipad-simulator')
940
941         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
942         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
943         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
944         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/iphone/test4.html', '')
945         host.filesystem.write_text_file('/MOCK output of child process/ImageDiff', '')
946
947         with OutputCapture():
948             logging = StringIO()
949             run_webkit_tests.run(port, run_webkit_tests.parse_args(['--debug-rwt-logging', '-n', '--no-build', '--root', '/build'])[0], [], logging_stream=logging)
950
951         for line in logging.getvalue():
952             if str(DeviceType.from_string('iPad (5th generation)')) in line:
953                 self.assertTrue('Skipping 3 test' in line)
954
955     def test_ipad_listing(self):
956         host = MockHost()
957         port = host.port_factory.get('ipad-simulator')
958
959         host.filesystem.write_text_file('/mock-checkout/LayoutTests/test1.html', '')
960         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ios/test2.html', '')
961         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/ipad/test3.html', '')
962         host.filesystem.write_text_file('/mock-checkout/LayoutTests/platform/iphone/test4.html', '')
963
964         with OutputCapture() as captured:
965             logging = StringIO()
966             run_webkit_tests._print_expectations(port, run_webkit_tests.parse_args([])[0], [], logging_stream=logging)
967
968         current_type = None
969         by_type = {}
970         for line in captured.stdout.getvalue().splitlines():
971             if not line or 'skip' in line:
972                 continue
973             if 'Tests to run' in line:
974                 current_type = DeviceType.from_string(line.split('for ')[-1].split(' running')[0]) if 'for ' in line else None
975                 by_type[current_type] = []
976                 continue
977             by_type[current_type].append(line)
978
979         self.assertEqual(1, len(by_type.keys()))
980         self.assertEqual(3, len(by_type[DeviceType.from_string('iPad (5th generation)')]))
981
982
983 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
984     def assertBaselines(self, file_list, file, extensions, err):
985         """assert that the file_list contains the baselines."""
986         for ext in extensions:
987             baseline = file + "-expected" + ext
988             baseline_msg = 'Writing new expected result "%s"\n' % baseline
989             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
990             self.assertContains(err, baseline_msg)
991
992     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
993     # supposed to be.
994
995     def test_reset_results(self):
996         # Test that we update expectations in place. If the expectation
997         # is missing, update the expected generic location.
998         host = MockHost()
999         details, err, _ = logging_run(
1000             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
1001             tests_included=True, host=host, new_results=True)
1002         file_list = host.filesystem.written_files.keys()
1003         self.assertEqual(details.exit_code, 0)
1004         self.assertEqual(len(file_list), 9)
1005         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
1006         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
1007
1008     def test_missing_results(self):
1009         # Test that we update expectations in place. If the expectation
1010         # is missing, update the expected generic location.
1011         host = MockHost()
1012         details, err, _ = logging_run(['--no-show-results',
1013             'failures/unexpected/missing_text.html',
1014             'failures/unexpected/missing_image.html',
1015             'failures/unexpected/missing_audio.html',
1016             'failures/unexpected/missing_render_tree_dump.html'],
1017             tests_included=True, host=host, new_results=True)
1018         file_list = host.filesystem.written_files.keys()
1019         self.assertEqual(details.exit_code, 0)
1020         self.assertEqual(len(file_list), 11)
1021         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
1022         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
1023         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
1024
1025     def test_new_baseline(self):
1026         # Test that we update the platform expectations in the version-specific directories
1027         # for both existing and new baselines.
1028         host = MockHost()
1029         details, err, _ = logging_run(
1030             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
1031             tests_included=True, host=host, new_results=True)
1032         file_list = host.filesystem.written_files.keys()
1033         self.assertEqual(details.exit_code, 0)
1034         self.assertEqual(len(file_list), 9)
1035         self.assertBaselines(file_list,
1036             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
1037         self.assertBaselines(file_list,
1038             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
1039
1040
1041 class PortTest(unittest.TestCase):
1042     def assert_mock_port_works(self, port_name, args=[]):
1043         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
1044
1045     def disabled_test_mac_lion(self):
1046         self.assert_mock_port_works('mac-lion')
1047
1048
1049 class MainTest(unittest.TestCase):
1050     def test_exception_handling(self):
1051         orig_run_fn = run_webkit_tests.run
1052
1053         # unused args pylint: disable=W0613
1054         def interrupting_run(port, options, args, stderr):
1055             raise KeyboardInterrupt
1056
1057         def successful_run(port, options, args, stderr):
1058
1059             class FakeRunDetails(object):
1060                 exit_code = -1
1061
1062             return FakeRunDetails()
1063
1064         def exception_raising_run(port, options, args, stderr):
1065             assert False
1066
1067         stdout = StringIO()
1068         stderr = StringIO()
1069         try:
1070             run_webkit_tests.run = interrupting_run
1071             res = run_webkit_tests.main([], stdout, stderr)
1072             self.assertEqual(res, INTERRUPTED_EXIT_STATUS)
1073
1074             run_webkit_tests.run = successful_run
1075             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1076             self.assertEqual(res, -1)
1077
1078             run_webkit_tests.run = exception_raising_run
1079             res = run_webkit_tests.main([], stdout, stderr)
1080             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
1081         finally:
1082             run_webkit_tests.run = orig_run_fn