1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
51 from webkitpy import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
54 from webkitpy.port import Port
55 from webkitpy.port import test
56 from webkitpy.test.skip import skip_if
59 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
60 extra_args = extra_args or []
62 if not '--platform' in extra_args:
63 args.extend(['--platform', 'test'])
65 args.append('--no-new-test-results')
67 if not '--child-processes' in extra_args:
68 args.extend(['--child-processes', 1])
70 if not '--world-leaks' in extra_args:
71 args.append('--world-leaks')
73 args.extend(extra_args)
74 if not tests_included:
75 # We use the glob to test that globbing works.
76 args.extend(['passes',
79 'failures/expected/*'])
80 return run_webkit_tests.parse_args(args)
83 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
84 options, parsed_args = parse_args(extra_args, tests_included)
86 host = host or MockHost()
87 port_obj = host.port_factory.get(port_name=options.platform, options=options)
90 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
92 logging_stream = StringIO.StringIO()
93 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
94 return run_details.exit_code == 0
97 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
98 options, parsed_args = parse_args(extra_args=extra_args,
99 tests_included=tests_included,
100 print_nothing=False, new_results=new_results)
101 host = host or MockHost()
103 port_obj = host.port_factory.get(port_name=options.platform, options=options)
105 run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
106 return (run_details, output, host.user)
109 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
111 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
112 oc = outputcapture.OutputCapture()
115 logging_stream = StringIO.StringIO()
116 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
119 return (run_details, logging_stream)
122 def get_tests_run(args, host=None):
123 results = get_test_results(args, host)
124 return [result.test_name for result in results]
127 def get_test_batches(args, host=None):
128 results = get_test_results(args, host)
132 for result in results:
133 if batch and result.pid != current_pid:
134 batches.append(batch)
136 batch.append(result.test_name)
138 batches.append(batch)
142 def get_test_results(args, host=None):
143 options, parsed_args = parse_args(args, tests_included=True)
145 host = host or MockHost()
146 port_obj = host.port_factory.get(port_name=options.platform, options=options)
148 oc = outputcapture.OutputCapture()
150 logging_stream = StringIO.StringIO()
152 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
157 if run_details.initial_results:
158 all_results.extend(run_details.initial_results.all_results)
160 if run_details.retry_results:
161 all_results.extend(run_details.retry_results.all_results)
165 def parse_full_results(full_results_text):
166 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
167 compressed_results = json.loads(json_to_eval)
168 return compressed_results
171 class StreamTestingMixin(object):
172 def assertContains(self, stream, string):
173 self.assertTrue(string in stream.getvalue())
175 def assertEmpty(self, stream):
176 self.assertFalse(stream.getvalue())
178 def assertNotEmpty(self, stream):
179 self.assertTrue(stream.getvalue())
182 class RunTest(unittest.TestCase, StreamTestingMixin):
184 # A real PlatformInfo object is used here instead of a
185 # MockPlatformInfo because we need to actually check for
186 # Windows and Mac to skip some tests.
187 self._platform = SystemHost().platform
189 # FIXME: Remove this when we fix test-webkitpy to work
190 # properly on cygwin (bug 63846).
191 self.should_test_processes = not self._platform.is_win()
193 def test_basic(self):
194 options, args = parse_args(tests_included=True)
195 logging_stream = StringIO.StringIO()
197 port_obj = host.port_factory.get(options.platform, options)
198 details = run_webkit_tests.run(port_obj, options, args, logging_stream)
200 # These numbers will need to be updated whenever we add new tests.
201 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
202 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
203 self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
204 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
205 self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
207 one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
208 details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
209 len(details.initial_results.unexpected_results_by_name))
210 self.assertTrue(one_line_summary in logging_stream.buflist)
212 # Ensure the results were summarized properly.
213 self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
215 # Ensure the image diff percentage is in the results.
216 self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
218 # Ensure the results were written out and displayed.
219 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
220 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
221 self.assertEqual(json.loads(json_to_eval), details.summarized_results)
223 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
225 def test_batch_size(self):
226 batch_tests_run = get_test_batches(['--batch-size', '2'])
227 for batch in batch_tests_run:
228 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
230 def test_child_processes_2(self):
231 if self.should_test_processes:
232 _, regular_output, _ = logging_run(
233 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
234 self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
236 def test_child_processes_min(self):
237 if self.should_test_processes:
238 _, regular_output, _ = logging_run(
239 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
240 tests_included=True, shared_port=False)
241 self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
243 def test_dryrun(self):
244 tests_run = get_tests_run(['--dry-run'])
245 self.assertEqual(tests_run, [])
247 tests_run = get_tests_run(['-n'])
248 self.assertEqual(tests_run, [])
250 def test_exception_raised(self):
251 # Exceptions raised by a worker are treated differently depending on
252 # whether they are in-process or out. inline exceptions work as normal,
253 # which allows us to get the full stack trace and traceback from the
254 # worker. The downside to this is that it could be any error, but this
255 # is actually useful in testing.
257 # Exceptions raised in a separate process are re-packaged into
258 # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
259 # be printed, but don't display properly in the unit test exception handlers.
260 self.assertRaises(BaseException, logging_run,
261 ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
263 if self.should_test_processes:
264 self.assertRaises(BaseException, logging_run,
265 ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
267 def test_full_results_html(self):
268 # FIXME: verify html?
269 details, _, _ = logging_run(['--full-results-html'])
270 self.assertEqual(details.exit_code, 0)
272 def test_hung_thread(self):
273 details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
274 # Note that hang.html is marked as WontFix and all WontFix tests are
275 # expected to Pass, so that actually running them generates an "unexpected" error.
276 self.assertEqual(details.exit_code, 1)
277 self.assertNotEmpty(err)
279 def test_keyboard_interrupt(self):
280 # Note that this also tests running a test marked as SKIP if
281 # you specify it explicitly.
282 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
283 self.assertEqual(details.exit_code, INTERRUPTED_EXIT_STATUS)
285 if self.should_test_processes:
286 _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
287 self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
289 def test_no_tests_found(self):
290 details, err, _ = logging_run(['resources'], tests_included=True)
291 self.assertEqual(details.exit_code, -1)
292 self.assertContains(err, 'No tests to run.\n')
294 def test_no_tests_found_2(self):
295 details, err, _ = logging_run(['foo'], tests_included=True)
296 self.assertEqual(details.exit_code, -1)
297 self.assertContains(err, 'No tests to run.\n')
299 def test_natural_order(self):
300 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
301 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
302 self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
304 def test_natural_order_test_specified_multiple_times(self):
305 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
306 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
307 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
309 def test_random_order(self):
310 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
311 tests_run = get_tests_run(['--order=random'] + tests_to_run)
312 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
314 def test_random_order_test_specified_multiple_times(self):
315 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
316 tests_run = get_tests_run(['--order=random'] + tests_to_run)
317 self.assertEqual(tests_run.count('passes/audio.html'), 2)
318 self.assertEqual(tests_run.count('passes/args.html'), 2)
320 def test_no_order(self):
321 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
322 tests_run = get_tests_run(['--order=none'] + tests_to_run)
323 self.assertEqual(tests_to_run, tests_run)
325 def test_no_order_test_specified_multiple_times(self):
326 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
327 tests_run = get_tests_run(['--order=none'] + tests_to_run)
328 self.assertEqual(tests_to_run, tests_run)
330 def test_no_order_with_directory_entries_in_natural_order(self):
331 tests_to_run = ['http/tests/ssl', 'http/tests/passes']
332 tests_run = get_tests_run(['--order=none'] + tests_to_run)
333 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
335 def test_gc_between_tests(self):
336 self.assertTrue(passing_run(['--gc-between-tests']))
338 def test_check_for_world_leaks(self):
339 self.assertTrue(passing_run(['--world-leaks']))
341 def test_complex_text(self):
342 self.assertTrue(passing_run(['--complex-text']))
344 def test_threaded(self):
345 self.assertTrue(passing_run(['--threaded']))
347 def test_repeat_each(self):
348 tests_to_run = ['passes/image.html', 'passes/text.html']
349 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
350 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
352 def test_ignore_flag(self):
353 # Note that passes/image.html is expected to be run since we specified it directly.
354 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
355 self.assertFalse('passes/text.html' in tests_run)
356 self.assertTrue('passes/image.html' in tests_run)
358 def test_skipped_flag(self):
359 tests_run = get_tests_run(['passes'])
360 self.assertFalse('passes/skipped/skip.html' in tests_run)
361 num_tests_run_by_default = len(tests_run)
363 # Check that nothing changes when we specify skipped=default.
364 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
365 num_tests_run_by_default)
367 # Now check that we run one more test (the skipped one).
368 tests_run = get_tests_run(['--skipped=ignore', 'passes'])
369 self.assertTrue('passes/skipped/skip.html' in tests_run)
370 self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
372 # Now check that we only run the skipped test.
373 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
375 # Now check that we don't run anything.
376 self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
378 def test_iterations(self):
379 tests_to_run = ['passes/image.html', 'passes/text.html']
380 tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
381 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
383 def test_repeat_each_iterations_num_tests(self):
384 # The total number of tests should be: number_of_tests *
385 # repeat_each * iterations
387 _, err, _ = logging_run(
388 ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
389 tests_included=True, host=host)
390 self.assertContains(err, "All 16 tests ran as expected.\n")
392 def test_run_chunk(self):
393 # Test that we actually select the right chunk
394 all_tests_run = get_tests_run(['passes', 'failures'])
395 chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
396 self.assertEqual(all_tests_run[4:8], chunk_tests_run)
398 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
399 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
400 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
401 self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
403 def test_run_force(self):
404 # This raises an exception because we run
405 # failures/expected/exception.html, which is normally SKIPped.
407 self.assertRaises(ValueError, logging_run, ['--force'])
409 def test_run_part(self):
410 # Test that we actually select the right part
411 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
412 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
413 self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
415 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
416 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
417 # last part repeats the first two tests).
418 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
419 self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
421 def test_run_singly(self):
422 batch_tests_run = get_test_batches(['--run-singly'])
423 for batch in batch_tests_run:
424 self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
426 def test_skip_failing_tests(self):
427 # This tests that we skip both known failing and known flaky tests. Because there are
428 # no known flaky tests in the default test_expectations, we add additional expectations.
430 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
432 batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
433 has_passes_text = False
434 for batch in batches:
435 self.assertFalse('failures/expected/text.html' in batch)
436 self.assertFalse('passes/image.html' in batch)
437 has_passes_text = has_passes_text or ('passes/text.html' in batch)
438 self.assertTrue(has_passes_text)
440 def test_run_singly_actually_runs_tests(self):
441 details, _, _ = logging_run(['--run-singly'], tests_included=True)
442 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1) # failures/expected/hang.html actually passes w/ --run-singly.
444 def test_single_file(self):
445 tests_run = get_tests_run(['passes/text.html'])
446 self.assertEqual(tests_run, ['passes/text.html'])
448 def test_single_file_with_prefix(self):
449 tests_run = get_tests_run(['LayoutTests/passes/text.html'])
450 self.assertEqual(['passes/text.html'], tests_run)
452 def test_single_skipped_file(self):
453 tests_run = get_tests_run(['failures/expected/keybaord.html'])
454 self.assertEqual([], tests_run)
456 def test_stderr_is_saved(self):
458 self.assertTrue(passing_run(host=host))
459 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
460 'stuff going to stderr')
462 def test_test_list(self):
464 filename = '/tmp/foo.txt'
465 host.filesystem.write_text_file(filename, 'passes/text.html')
466 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
467 self.assertEqual(['passes/text.html'], tests_run)
468 host.filesystem.remove(filename)
469 details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
470 self.assertEqual(details.exit_code, -1)
471 self.assertNotEmpty(err)
473 def test_test_list_with_prefix(self):
475 filename = '/tmp/foo.txt'
476 host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
477 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
478 self.assertEqual(['passes/text.html'], tests_run)
480 def test_missing_and_unexpected_results(self):
481 # Test that we update expectations in place. If the expectation
482 # is missing, update the expected generic location.
484 details, err, _ = logging_run(['--no-show-results',
485 'failures/expected/missing_image.html',
486 'failures/unexpected/missing_text.html',
487 'failures/unexpected/text-image-checksum.html'],
488 tests_included=True, host=host)
489 file_list = host.filesystem.written_files.keys()
490 self.assertEqual(details.exit_code, 1)
491 expected_token = '"unexpected":{"text-image-checksum.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
492 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
493 self.assertTrue(json_string.find(expected_token) != -1)
494 self.assertTrue(json_string.find('"num_regressions":1') != -1)
495 self.assertTrue(json_string.find('"num_flaky":0') != -1)
496 self.assertTrue(json_string.find('"num_missing":1') != -1)
498 def test_pixel_test_directories(self):
501 """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
502 args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
503 'failures/unexpected/pixeldir/image_in_pixeldir.html',
504 'failures/unexpected/image_not_in_pixeldir.html']
505 details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
507 self.assertEqual(details.exit_code, 1)
508 expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"report":"REGRESSION","expected":"PASS","actual":"IMAGE"'
509 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
510 self.assertTrue(json_string.find(expected_token) != -1)
512 def test_missing_and_unexpected_results_with_custom_exit_code(self):
513 # Test that we update expectations in place. If the expectation
514 # is missing, update the expected generic location.
515 class CustomExitCodePort(test.TestPort):
516 def exit_code_from_summarized_results(self, unexpected_results):
517 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
520 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
521 test_port = CustomExitCodePort(host, options=options)
522 details, err, _ = logging_run(['--no-show-results',
523 'failures/expected/missing_image.html',
524 'failures/unexpected/missing_text.html',
525 'failures/unexpected/text-image-checksum.html'],
526 tests_included=True, host=host, port_obj=test_port)
527 self.assertEqual(details.exit_code, 2)
529 def test_crash_with_stderr(self):
531 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
532 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"report":"REGRESSION","expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
534 def test_no_image_failure_with_image_diff(self):
536 _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
537 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
539 def test_crash_log(self):
540 # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
541 # Currently CrashLog uploading only works on Darwin and Windows.
542 if not self._platform.is_mac() or self._platform.is_win():
544 mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
546 host.filesystem.write_text_file('/tmp/layout-test-results/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
547 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html', '--dump-render-tree'], tests_included=True, host=host)
548 expected_crash_log = mock_crash_report
549 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
551 def test_web_process_crash_log(self):
552 # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
553 # Currently CrashLog uploading only works on Darwin and Windows.
554 if not self._platform.is_mac() or self._platform.is_win():
556 mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
558 host.filesystem.write_text_file('/tmp/layout-test-results/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
559 logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
560 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
562 def test_exit_after_n_failures_upload(self):
564 details, regular_output, user = logging_run(
565 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
566 tests_included=True, host=host)
568 # By returning False, we know that the incremental results were generated and then deleted.
569 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
571 # This checks that we report only the number of tests that actually failed.
572 self.assertEqual(details.exit_code, 1)
574 # This checks that passes/text.html is considered SKIPped.
575 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
577 # This checks that we told the user we bailed out.
578 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
580 # This checks that neither test ran as expected.
581 # FIXME: This log message is confusing; tests that were skipped should be called out separately.
582 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
584 def test_exit_after_n_failures(self):
585 # Unexpected failures should result in tests stopping.
586 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
587 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
589 # But we'll keep going for expected ones.
590 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
591 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
593 def test_exit_after_n_crashes(self):
594 # Unexpected crashes should result in tests stopping.
595 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
596 self.assertEqual(['failures/unexpected/crash.html'], tests_run)
598 # Same with timeouts.
599 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
600 self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
602 # But we'll keep going for expected ones.
603 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
604 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
606 def test_results_directory_absolute(self):
607 # We run a configuration that should fail, to generate output, then
608 # look for what the output results url was.
611 with host.filesystem.mkdtemp() as tmpdir:
612 _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
613 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
615 def test_results_directory_default(self):
616 # We run a configuration that should fail, to generate output, then
617 # look for what the output results url was.
619 # This is the default location.
620 _, _, user = logging_run(tests_included=True)
621 self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
623 def test_results_directory_relative(self):
624 # We run a configuration that should fail, to generate output, then
625 # look for what the output results url was.
627 host.filesystem.maybe_make_directory('/tmp/cwd')
628 host.filesystem.chdir('/tmp/cwd')
629 _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
630 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
632 def test_retrying_and_flaky_tests(self):
634 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
635 self.assertEqual(details.exit_code, 0)
636 self.assertTrue('Retrying' in err.getvalue())
637 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
638 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
640 # Now we test that --clobber-old-results does remove the old entries and the old retries,
641 # and that we don't retry again.
643 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
644 self.assertEqual(details.exit_code, 1)
645 self.assertTrue('Clobbering old results' in err.getvalue())
646 self.assertTrue('flaky/text.html' in err.getvalue())
647 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
648 self.assertFalse(host.filesystem.exists('retries'))
650 def test_retrying_force_pixel_tests(self):
652 details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
653 self.assertEqual(details.exit_code, 1)
654 self.assertTrue('Retrying' in err.getvalue())
655 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
656 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
657 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
658 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
659 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
660 json = parse_full_results(json_string)
661 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
662 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1, "report": "REGRESSION"})
663 self.assertFalse(json["pixel_tests_enabled"])
664 self.assertEqual(details.enabled_pixel_tests_in_retry, True)
666 def test_failed_text_with_missing_pixel_results_on_retry(self):
667 # Test what happens when pixel results are missing on retry.
669 details, err, _ = logging_run(['--no-show-results',
670 '--no-new-test-results', '--no-pixel-tests',
671 'failures/unexpected/text-image-missing.html'],
672 tests_included=True, host=host)
673 file_list = host.filesystem.written_files.keys()
674 self.assertEqual(details.exit_code, 1)
675 expected_token = '"unexpected":{"text-image-missing.html":{"report":"REGRESSION","expected":"PASS","actual":"TEXT MISSING","is_missing_image":true}}'
676 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
677 self.assertTrue(json_string.find(expected_token) != -1)
678 self.assertTrue(json_string.find('"num_regressions":1') != -1)
679 self.assertTrue(json_string.find('"num_flaky":0') != -1)
680 self.assertTrue(json_string.find('"num_missing":1') != -1)
682 def test_retrying_uses_retries_directory(self):
684 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
685 self.assertEqual(details.exit_code, 1)
686 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
687 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
689 def test_run_order__inline(self):
690 # These next tests test that we run the tests in ascending alphabetical
691 # order per directory. HTTP tests are sharded separately from other tests,
692 # so we have to test both.
693 tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
694 self.assertEqual(tests_run, sorted(tests_run))
696 tests_run = get_tests_run(['http/tests/passes'])
697 self.assertEqual(tests_run, sorted(tests_run))
699 def test_tolerance(self):
700 class ImageDiffTestPort(test.TestPort):
701 def diff_image(self, expected_contents, actual_contents, tolerance=None):
702 self.tolerance_used_for_diff_image = self._options.tolerance
703 return (True, 1, None)
705 def get_port_for_run(args):
706 options, parsed_args = run_webkit_tests.parse_args(args)
708 test_port = ImageDiffTestPort(host, options=options)
709 res = passing_run(args, port_obj=test_port, tests_included=True)
713 base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
715 # If we pass in an explicit tolerance argument, then that will be used.
716 test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
717 self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
718 test_port = get_port_for_run(base_args + ['--tolerance', '0'])
719 self.assertEqual(0, test_port.tolerance_used_for_diff_image)
721 # Otherwise the port's default tolerance behavior (including ignoring it)
723 test_port = get_port_for_run(base_args)
724 self.assertEqual(None, test_port.tolerance_used_for_diff_image)
726 def test_reftest_run(self):
727 tests_run = get_tests_run(['passes/reftest.html'])
728 self.assertEqual(['passes/reftest.html'], tests_run)
730 def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
731 tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
732 self.assertEqual(['passes/reftest.html'], tests_run)
734 def test_reftest_skip_reftests_if_no_ref_tests(self):
735 tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
736 self.assertEqual([], tests_run)
737 tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
738 self.assertEqual([], tests_run)
740 def test_reftest_expected_html_should_be_ignored(self):
741 tests_run = get_tests_run(['passes/reftest-expected.html'])
742 self.assertEqual([], tests_run)
744 def test_reftest_driver_should_run_expected_html(self):
745 tests_run = get_test_results(['passes/reftest.html'])
746 self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
748 def test_reftest_driver_should_run_expected_mismatch_html(self):
749 tests_run = get_test_results(['passes/mismatch.html'])
750 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
752 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
754 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
755 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
756 self.assertTrue(json_string.find('"unlistedtest.html":{"report":"MISSING","expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
757 self.assertTrue(json_string.find('"num_regressions":4') != -1)
758 self.assertTrue(json_string.find('"num_flaky":0') != -1)
759 self.assertTrue(json_string.find('"num_missing":1') != -1)
761 def test_additional_platform_directory(self):
762 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
763 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
764 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
765 self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
767 def test_additional_expectations(self):
769 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
770 self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
771 tests_included=True, host=host))
773 def test_no_http_and_force(self):
774 # See test_run_force, using --force raises an exception.
775 # FIXME: We would like to check the warnings generated.
776 self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
779 def has_test_of_type(tests, type):
780 return [test for test in tests if type in test]
782 def test_no_http_tests(self):
783 batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
784 self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
785 self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
787 batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
788 self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
789 self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
791 batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
792 self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
793 self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
795 def test_platform_tests_are_found(self):
796 tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
797 self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
798 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
800 def test_output_diffs(self):
801 # Test to ensure that we don't generate -pretty.html if PrettyPatch isn't available.
803 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
804 written_files = host.filesystem.written_files
805 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
806 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
808 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
809 full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
810 self.assertEqual(full_results['has_pretty_patch'], False)
812 def test_unsupported_platform(self):
813 stdout = StringIO.StringIO()
814 stderr = StringIO.StringIO()
815 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
817 self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
818 self.assertEqual(stdout.getvalue(), '')
819 self.assertTrue('unsupported platform' in stderr.getvalue())
821 def test_verbose_in_child_processes(self):
822 # When we actually run multiple processes, we may have to reconfigure logging in the
823 # child process (e.g., on win32) and we need to make sure that works and we still
824 # see the verbose log output. However, we can't use logging_run() because using
825 # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
827 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
828 if not self.should_test_processes:
831 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
833 port_obj = host.port_factory.get(port_name=options.platform, options=options)
834 logging_stream = StringIO.StringIO()
835 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
836 self.assertTrue('text.html passed' in logging_stream.getvalue())
837 self.assertTrue('image.html passed' in logging_stream.getvalue())
840 class EndToEndTest(unittest.TestCase):
841 def test_reftest_with_two_notrefs(self):
842 # Test that we update expectations in place. If the expectation
843 # is missing, update the expected generic location.
845 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
846 file_list = host.filesystem.written_files.keys()
848 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
849 json = parse_full_results(json_string)
850 self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
851 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
852 self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
853 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
854 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1, "report": "REGRESSION"})
855 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
856 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "report": "REGRESSION"})
857 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
858 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "report": "REGRESSION"})
861 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
862 def assertBaselines(self, file_list, file, extensions, err):
863 "assert that the file_list contains the baselines."""
864 for ext in extensions:
865 baseline = file + "-expected" + ext
866 baseline_msg = 'Writing new expected result "%s"\n' % baseline
867 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
868 self.assertContains(err, baseline_msg)
870 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
873 def test_reset_results(self):
874 # Test that we update expectations in place. If the expectation
875 # is missing, update the expected generic location.
877 details, err, _ = logging_run(
878 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
879 tests_included=True, host=host, new_results=True)
880 file_list = host.filesystem.written_files.keys()
881 self.assertEqual(details.exit_code, 0)
882 self.assertEqual(len(file_list), 8)
883 self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
884 self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
886 def test_missing_results(self):
887 # Test that we update expectations in place. If the expectation
888 # is missing, update the expected generic location.
890 details, err, _ = logging_run(['--no-show-results',
891 'failures/unexpected/missing_text.html',
892 'failures/unexpected/missing_image.html',
893 'failures/unexpected/missing_audio.html',
894 'failures/unexpected/missing_render_tree_dump.html'],
895 tests_included=True, host=host, new_results=True)
896 file_list = host.filesystem.written_files.keys()
897 self.assertEqual(details.exit_code, 0)
898 self.assertEqual(len(file_list), 10)
899 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
900 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
901 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
903 def test_new_baseline(self):
904 # Test that we update the platform expectations in the version-specific directories
905 # for both existing and new baselines.
907 details, err, _ = logging_run(
908 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
909 tests_included=True, host=host, new_results=True)
910 file_list = host.filesystem.written_files.keys()
911 self.assertEqual(details.exit_code, 0)
912 self.assertEqual(len(file_list), 8)
913 self.assertBaselines(file_list,
914 "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
915 self.assertBaselines(file_list,
916 "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
919 class PortTest(unittest.TestCase):
920 def assert_mock_port_works(self, port_name, args=[]):
921 self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
923 def disabled_test_mac_lion(self):
924 self.assert_mock_port_works('mac-lion')
927 class MainTest(unittest.TestCase):
928 def test_exception_handling(self):
929 orig_run_fn = run_webkit_tests.run
931 # unused args pylint: disable=W0613
932 def interrupting_run(port, options, args, stderr):
933 raise KeyboardInterrupt
935 def successful_run(port, options, args, stderr):
937 class FakeRunDetails(object):
940 return FakeRunDetails()
942 def exception_raising_run(port, options, args, stderr):
945 stdout = StringIO.StringIO()
946 stderr = StringIO.StringIO()
948 run_webkit_tests.run = interrupting_run
949 res = run_webkit_tests.main([], stdout, stderr)
950 self.assertEqual(res, INTERRUPTED_EXIT_STATUS)
952 run_webkit_tests.run = successful_run
953 res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
954 self.assertEqual(res, -1)
956 run_webkit_tests.run = exception_raising_run
957 res = run_webkit_tests.main([], stdout, stderr)
958 self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
960 run_webkit_tests.run = orig_run_fn