2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 """Unit tests for run_webkit_tests."""
33 from __future__ import with_statement
45 from webkitpy.common import array_stream
46 from webkitpy.common.system import outputcapture
47 from webkitpy.common.system import filesystem_mock
48 from webkitpy.tool import mocktool
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
56 from webkitpy.thirdparty.mock import Mock
59 def parse_args(extra_args=None, record_results=False, tests_included=False,
61 extra_args = extra_args or []
63 args = ['--print', 'nothing']
66 if not '--platform' in extra_args:
67 args.extend(['--platform', 'test'])
68 if not record_results:
69 args.append('--no-record-results')
70 if not '--child-processes' in extra_args:
71 args.extend(['--worker-model', 'old-inline'])
72 args.extend(extra_args)
73 if not tests_included:
74 # We use the glob to test that globbing works.
75 args.extend(['passes',
78 'failures/expected/*'])
79 return run_webkit_tests.parse_args(args)
82 def passing_run(extra_args=None, port_obj=None, record_results=False,
83 tests_included=False, filesystem=None):
84 options, parsed_args = parse_args(extra_args, record_results,
87 port_obj = port.get(port_name=options.platform, options=options,
88 user=mocktool.MockUser(), filesystem=filesystem)
89 res = run_webkit_tests.run(port_obj, options, parsed_args)
93 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
94 options, parsed_args = parse_args(extra_args=extra_args,
95 record_results=record_results,
96 tests_included=tests_included,
98 user = mocktool.MockUser()
100 port_obj = port.get(port_name=options.platform, options=options,
101 user=user, filesystem=filesystem)
103 res, buildbot_output, regular_output = run_and_capture(port_obj, options,
105 return (res, buildbot_output, regular_output, user)
108 def run_and_capture(port_obj, options, parsed_args):
109 oc = outputcapture.OutputCapture()
112 buildbot_output = array_stream.ArrayStream()
113 regular_output = array_stream.ArrayStream()
114 res = run_webkit_tests.run(port_obj, options, parsed_args,
115 buildbot_output=buildbot_output,
116 regular_output=regular_output)
119 return (res, buildbot_output, regular_output)
122 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False, filesystem=None):
123 extra_args = extra_args or []
124 if not tests_included:
125 # Not including http tests since they get run out of order (that
126 # behavior has its own test, see test_get_test_file_queue)
127 extra_args = ['passes', 'failures'] + extra_args
128 options, parsed_args = parse_args(extra_args, tests_included=True)
130 user = mocktool.MockUser()
134 class RecordingTestDriver(TestDriver):
135 def __init__(self, port, worker_number):
136 TestDriver.__init__(self, port, worker_number)
137 self._current_test_batch = None
140 # So that we don't create a new driver for every test
144 self._current_test_batch = None
146 def run_test(self, test_input):
147 if self._current_test_batch is None:
148 self._current_test_batch = []
149 test_batches.append(self._current_test_batch)
150 test_name = self._port.relative_test_filename(test_input.filename)
151 self._current_test_batch.append(test_name)
152 return TestDriver.run_test(self, test_input)
154 class RecordingTestPort(TestPort):
155 def create_driver(self, worker_number):
156 return RecordingTestDriver(self, worker_number)
158 recording_port = RecordingTestPort(options=options, user=user, filesystem=filesystem)
159 run_and_capture(recording_port, options, parsed_args)
162 return list(itertools.chain(*test_batches))
167 class MainTest(unittest.TestCase):
168 def test_accelerated_compositing(self):
169 # This just tests that we recognize the command line args
170 self.assertTrue(passing_run(['--accelerated-compositing']))
171 self.assertTrue(passing_run(['--no-accelerated-compositing']))
173 def test_accelerated_2d_canvas(self):
174 # This just tests that we recognize the command line args
175 self.assertTrue(passing_run(['--accelerated-2d-canvas']))
176 self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
178 def test_basic(self):
179 self.assertTrue(passing_run())
181 def test_batch_size(self):
182 batch_tests_run = get_tests_run(['--batch-size', '2'])
183 for batch in batch_tests_run:
184 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
186 def test_child_process_1(self):
187 (res, buildbot_output, regular_output, user) = logging_run(
188 ['--print', 'config', '--child-processes', '1'])
189 self.assertTrue('Running one DumpRenderTree\n'
190 in regular_output.get())
192 def test_child_processes_2(self):
193 (res, buildbot_output, regular_output, user) = logging_run(
194 ['--print', 'config', '--child-processes', '2'])
195 self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
196 in regular_output.get())
198 def test_dryrun(self):
199 batch_tests_run = get_tests_run(['--dry-run'])
200 self.assertEqual(batch_tests_run, [])
202 batch_tests_run = get_tests_run(['-n'])
203 self.assertEqual(batch_tests_run, [])
205 def test_exception_raised(self):
206 self.assertRaises(ValueError, logging_run,
207 ['failures/expected/exception.html'], tests_included=True)
209 def test_full_results_html(self):
210 # FIXME: verify html?
211 res, out, err, user = logging_run(['--full-results-html'])
212 self.assertEqual(res, 0)
214 def test_help_printing(self):
215 res, out, err, user = logging_run(['--help-printing'])
216 self.assertEqual(res, 0)
217 self.assertTrue(out.empty())
218 self.assertFalse(err.empty())
220 def test_hung_thread(self):
221 res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
222 'failures/expected/hang.html'],
224 self.assertEqual(res, 0)
225 self.assertFalse(out.empty())
226 self.assertFalse(err.empty())
228 def test_keyboard_interrupt(self):
229 # Note that this also tests running a test marked as SKIP if
230 # you specify it explicitly.
231 self.assertRaises(KeyboardInterrupt, logging_run,
232 ['failures/expected/keyboard.html'], tests_included=True)
234 def test_last_results(self):
235 fs = port.unit_test_filesystem()
236 # We do a logging run here instead of a passing run in order to
237 # suppress the output from the json generator.
238 (res, buildbot_output, regular_output, user) = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs)
239 (res, buildbot_output, regular_output, user) = logging_run(
240 ['--print-last-failures'], filesystem=fs)
241 self.assertEqual(regular_output.get(), ['\n\n'])
242 self.assertEqual(buildbot_output.get(), [])
244 def test_lint_test_files(self):
245 res, out, err, user = logging_run(['--lint-test-files'])
246 self.assertEqual(res, 0)
247 self.assertTrue(out.empty())
248 self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
250 def test_lint_test_files__errors(self):
251 options, parsed_args = parse_args(['--lint-test-files'])
252 user = mocktool.MockUser()
253 port_obj = port.get(options.platform, options=options, user=user)
254 port_obj.test_expectations = lambda: "# syntax error"
255 res, out, err = run_and_capture(port_obj, options, parsed_args)
257 self.assertEqual(res, -1)
258 self.assertTrue(out.empty())
259 self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
261 def test_no_tests_found(self):
262 res, out, err, user = logging_run(['resources'], tests_included=True)
263 self.assertEqual(res, -1)
264 self.assertTrue(out.empty())
265 self.assertTrue('No tests to run.\n' in err.get())
267 def test_no_tests_found_2(self):
268 res, out, err, user = logging_run(['foo'], tests_included=True)
269 self.assertEqual(res, -1)
270 self.assertTrue(out.empty())
271 self.assertTrue('No tests to run.\n' in err.get())
273 def test_randomize_order(self):
274 # FIXME: verify order was shuffled
275 self.assertTrue(passing_run(['--randomize-order']))
277 def test_run_chunk(self):
278 # Test that we actually select the right chunk
279 all_tests_run = get_tests_run(flatten_batches=True)
280 chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
281 self.assertEquals(all_tests_run[4:8], chunk_tests_run)
283 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
284 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
285 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
286 self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
288 def test_run_force(self):
289 # This raises an exception because we run
290 # failures/expected/exception.html, which is normally SKIPped.
291 self.assertRaises(ValueError, logging_run, ['--force'])
293 def test_run_part(self):
294 # Test that we actually select the right part
295 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
296 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
297 self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
299 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
300 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
301 # last part repeats the first two tests).
302 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
303 self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
305 def test_run_singly(self):
306 batch_tests_run = get_tests_run(['--run-singly'])
307 for batch in batch_tests_run:
308 self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
310 def test_single_file(self):
311 tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
312 self.assertEquals(['passes/text.html'], tests_run)
314 def test_single_file_with_prefix(self):
315 tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
316 self.assertEquals(['passes/text.html'], tests_run)
318 def test_single_skipped_file(self):
319 tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
320 self.assertEquals([], tests_run)
322 def test_test_list(self):
323 fs = port.unit_test_filesystem()
324 filename = '/tmp/foo.txt'
325 fs.write_text_file(filename, 'passes/text.html')
326 tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
327 self.assertEquals(['passes/text.html'], tests_run)
329 res, out, err, user = logging_run(['--test-list=%s' % filename],
330 tests_included=True, filesystem=fs)
331 self.assertEqual(res, -1)
332 self.assertFalse(err.empty())
334 def test_test_list_with_prefix(self):
335 fs = port.unit_test_filesystem()
336 filename = '/tmp/foo.txt'
337 fs.write_text_file(filename, 'LayoutTests/passes/text.html')
338 tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
339 self.assertEquals(['passes/text.html'], tests_run)
341 def test_unexpected_failures(self):
342 # Run tests including the unexpected failures.
343 self._url_opened = None
344 res, out, err, user = logging_run(tests_included=True)
345 self.assertEqual(res, 3)
346 self.assertFalse(out.empty())
347 self.assertFalse(err.empty())
348 self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
350 def test_exit_after_n_failures(self):
351 # Unexpected failures should result in tests stopping.
352 tests_run = get_tests_run([
353 'failures/unexpected/text-image-checksum.html',
355 '--exit-after-n-failures', '1',
358 flatten_batches=True)
359 self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
361 # But we'll keep going for expected ones.
362 tests_run = get_tests_run([
363 'failures/expected/text.html',
365 '--exit-after-n-failures', '1',
368 flatten_batches=True)
369 self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
371 def test_exit_after_n_crashes(self):
372 # Unexpected crashes should result in tests stopping.
373 tests_run = get_tests_run([
374 'failures/unexpected/crash.html',
376 '--exit-after-n-crashes-or-timeouts', '1',
379 flatten_batches=True)
380 self.assertEquals(['failures/unexpected/crash.html'], tests_run)
382 # Same with timeouts.
383 tests_run = get_tests_run([
384 'failures/unexpected/timeout.html',
386 '--exit-after-n-crashes-or-timeouts', '1',
389 flatten_batches=True)
390 self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
392 # But we'll keep going for expected ones.
393 tests_run = get_tests_run([
394 'failures/expected/crash.html',
396 '--exit-after-n-crashes-or-timeouts', '1',
399 flatten_batches=True)
400 self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
402 def test_results_directory_absolute(self):
403 # We run a configuration that should fail, to generate output, then
404 # look for what the output results url was.
406 fs = port.unit_test_filesystem()
407 with fs.mkdtemp() as tmpdir:
408 res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
409 tests_included=True, filesystem=fs)
410 self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
412 def test_results_directory_default(self):
413 # We run a configuration that should fail, to generate output, then
414 # look for what the output results url was.
416 # This is the default location.
417 res, out, err, user = logging_run(tests_included=True)
418 self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
420 def test_results_directory_relative(self):
421 # We run a configuration that should fail, to generate output, then
422 # look for what the output results url was.
424 res, out, err, user = logging_run(['--results-directory=foo'],
426 self.assertEqual(user.opened_urls, ['/tmp/foo/results.html'])
428 def test_tolerance(self):
429 class ImageDiffTestPort(TestPort):
430 def diff_image(self, expected_contents, actual_contents,
432 self.tolerance_used_for_diff_image = self._options.tolerance
435 def get_port_for_run(args):
436 options, parsed_args = run_webkit_tests.parse_args(args)
437 test_port = ImageDiffTestPort(options=options, user=mocktool.MockUser())
438 passing_run(args, port_obj=test_port, tests_included=True)
441 base_args = ['--pixel-tests', 'failures/expected/*']
443 # If we pass in an explicit tolerance argument, then that will be used.
444 test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
445 self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
446 test_port = get_port_for_run(base_args + ['--tolerance', '0'])
447 self.assertEqual(0, test_port.tolerance_used_for_diff_image)
449 # Otherwise the port's default tolerance behavior (including ignoring it)
451 test_port = get_port_for_run(base_args)
452 self.assertEqual(None, test_port.tolerance_used_for_diff_image)
454 def test_worker_model__inline(self):
455 self.assertTrue(passing_run(['--worker-model', 'old-inline']))
457 def test_worker_model__threads(self):
458 self.assertTrue(passing_run(['--worker-model', 'old-threads']))
460 def test_worker_model__unknown(self):
461 self.assertRaises(ValueError, logging_run,
462 ['--worker-model', 'unknown'])
464 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
467 class RebaselineTest(unittest.TestCase):
468 def assertBaselines(self, file_list, file):
469 "assert that the file_list contains the baselines."""
470 for ext in [".txt", ".png", ".checksum"]:
471 baseline = file + "-expected" + ext
472 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
474 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
477 def test_reset_results(self):
478 # Test that we update expectations in place. If the expectation
479 # is missing, update the expected generic location.
480 fs = port.unit_test_filesystem()
481 passing_run(['--pixel-tests',
484 'failures/expected/missing_image.html'],
485 tests_included=True, filesystem=fs)
486 file_list = fs.written_files.keys()
487 file_list.remove('/tmp/layout-test-results/tests_run.txt')
488 self.assertEqual(len(file_list), 6)
489 self.assertBaselines(file_list,
491 self.assertBaselines(file_list,
492 "/failures/expected/missing_image")
494 def test_new_baseline(self):
495 # Test that we update the platform expectations. If the expectation
496 # is mssing, then create a new expectation in the platform dir.
497 fs = port.unit_test_filesystem()
498 passing_run(['--pixel-tests',
501 'failures/expected/missing_image.html'],
502 tests_included=True, filesystem=fs)
503 file_list = fs.written_files.keys()
504 file_list.remove('/tmp/layout-test-results/tests_run.txt')
505 self.assertEqual(len(file_list), 6)
506 self.assertBaselines(file_list,
507 "/platform/test-mac/passes/image")
508 self.assertBaselines(file_list,
509 "/platform/test-mac/failures/expected/missing_image")
512 class DryrunTest(unittest.TestCase):
513 # FIXME: it's hard to know which platforms are safe to test; the
514 # chromium platforms require a chromium checkout, and the mac platform
515 # requires fcntl, so it can't be tested on win32, etc. There is
516 # probably a better way of handling this.
517 def disabled_test_darwin(self):
518 if sys.platform != "darwin":
521 self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'],
522 tests_included=True))
523 self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'],
524 tests_included=True))
527 self.assertTrue(passing_run(['--platform', 'dryrun-test',
531 if __name__ == '__main__':