2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 """Unit tests for run_webkit_tests."""
46 from webkitpy.common import array_stream
47 from webkitpy.common.system import outputcapture
48 from webkitpy.common.system import user
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
56 from webkitpy.thirdparty.mock import Mock
63 def open_url(self, url):
67 def passing_run(extra_args=None, port_obj=None, record_results=False,
68 tests_included=False):
69 extra_args = extra_args or []
70 args = ['--print', 'nothing']
71 if not '--platform' in extra_args:
72 args.extend(['--platform', 'test'])
73 if not record_results:
74 args.append('--no-record-results')
75 if not '--child-processes' in extra_args:
76 args.extend(['--worker-model', 'inline'])
77 args.extend(extra_args)
78 if not tests_included:
79 # We use the glob to test that globbing works.
80 args.extend(['passes',
83 'failures/expected/*'])
84 options, parsed_args = run_webkit_tests.parse_args(args)
86 port_obj = port.get(port_name=options.platform, options=options,
88 res = run_webkit_tests.run(port_obj, options, parsed_args)
92 def logging_run(extra_args=None, port_obj=None, tests_included=False):
93 extra_args = extra_args or []
94 args = ['--no-record-results']
95 if not '--platform' in extra_args:
96 args.extend(['--platform', 'test'])
97 if not '--child-processes' in extra_args:
98 args.extend(['--worker-model', 'inline'])
99 args.extend(extra_args)
100 if not tests_included:
101 args.extend(['passes',
104 'failures/expected/*'])
106 oc = outputcapture.OutputCapture()
109 options, parsed_args = run_webkit_tests.parse_args(args)
112 port_obj = port.get(port_name=options.platform, options=options,
114 buildbot_output = array_stream.ArrayStream()
115 regular_output = array_stream.ArrayStream()
116 res = run_webkit_tests.run(port_obj, options, parsed_args,
117 buildbot_output=buildbot_output,
118 regular_output=regular_output)
121 return (res, buildbot_output, regular_output, user)
124 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
125 extra_args = extra_args or []
127 '--print', 'nothing',
128 '--platform', 'test',
129 '--no-record-results',
130 '--worker-model', 'inline']
131 args.extend(extra_args)
132 if not tests_included:
133 # Not including http tests since they get run out of order (that
134 # behavior has its own test, see test_get_test_file_queue)
135 args.extend(['passes', 'failures'])
136 options, parsed_args = run_webkit_tests.parse_args(args)
141 class RecordingTestDriver(TestDriver):
142 def __init__(self, port, worker_number):
143 TestDriver.__init__(self, port, worker_number)
144 self._current_test_batch = None
147 # So that we don't create a new driver for every test
151 self._current_test_batch = None
153 def run_test(self, test_input):
154 if self._current_test_batch is None:
155 self._current_test_batch = []
156 test_batches.append(self._current_test_batch)
157 test_name = self._port.relative_test_filename(test_input.filename)
158 self._current_test_batch.append(test_name)
159 return TestDriver.run_test(self, test_input)
161 class RecordingTestPort(TestPort):
162 def create_driver(self, worker_number):
163 return RecordingTestDriver(self, worker_number)
165 recording_port = RecordingTestPort(options=options, user=user)
166 logging_run(extra_args=args, port_obj=recording_port, tests_included=True)
169 return list(itertools.chain(*test_batches))
173 class MainTest(unittest.TestCase):
174 def test_accelerated_compositing(self):
175 # This just tests that we recognize the command line args
176 self.assertTrue(passing_run(['--accelerated-compositing']))
177 self.assertTrue(passing_run(['--no-accelerated-compositing']))
179 def test_accelerated_2d_canvas(self):
180 # This just tests that we recognize the command line args
181 self.assertTrue(passing_run(['--accelerated-2d-canvas']))
182 self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
184 def test_basic(self):
185 self.assertTrue(passing_run())
187 def test_batch_size(self):
188 batch_tests_run = get_tests_run(['--batch-size', '2'])
189 for batch in batch_tests_run:
190 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
192 def test_child_process_1(self):
193 (res, buildbot_output, regular_output, user) = logging_run(
194 ['--print', 'config', '--child-processes', '1'])
195 self.assertTrue('Running one DumpRenderTree\n'
196 in regular_output.get())
198 def test_child_processes_2(self):
199 (res, buildbot_output, regular_output, user) = logging_run(
200 ['--print', 'config', '--child-processes', '2'])
201 self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
202 in regular_output.get())
204 def test_dryrun(self):
205 batch_tests_run = get_tests_run(['--dry-run'])
206 self.assertEqual(batch_tests_run, [])
208 batch_tests_run = get_tests_run(['-n'])
209 self.assertEqual(batch_tests_run, [])
211 def test_exception_raised(self):
212 self.assertRaises(ValueError, logging_run,
213 ['failures/expected/exception.html'], tests_included=True)
215 def test_full_results_html(self):
216 # FIXME: verify html?
217 self.assertTrue(passing_run(['--full-results-html']))
219 def test_help_printing(self):
220 res, out, err, user = logging_run(['--help-printing'])
221 self.assertEqual(res, 0)
222 self.assertTrue(out.empty())
223 self.assertFalse(err.empty())
225 def test_hung_thread(self):
226 res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
227 'failures/expected/hang.html'],
229 self.assertEqual(res, 0)
230 self.assertFalse(out.empty())
231 self.assertFalse(err.empty())
233 def test_keyboard_interrupt(self):
234 # Note that this also tests running a test marked as SKIP if
235 # you specify it explicitly.
236 self.assertRaises(KeyboardInterrupt, logging_run,
237 ['failures/expected/keyboard.html'], tests_included=True)
239 def test_last_results(self):
240 passing_run(['--clobber-old-results'], record_results=True)
241 (res, buildbot_output, regular_output, user) = logging_run(
242 ['--print-last-failures'])
243 self.assertEqual(regular_output.get(), ['\n\n'])
244 self.assertEqual(buildbot_output.get(), [])
246 def test_lint_test_files(self):
248 res, out, err, user = logging_run(['--lint-test-files'],
250 self.assertEqual(res, 0)
251 self.assertTrue(out.empty())
252 self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
254 def test_no_tests_found(self):
255 res, out, err, user = logging_run(['resources'], tests_included=True)
256 self.assertEqual(res, -1)
257 self.assertTrue(out.empty())
258 self.assertTrue('No tests to run.\n' in err.get())
260 def test_no_tests_found_2(self):
261 res, out, err, user = logging_run(['foo'], tests_included=True)
262 self.assertEqual(res, -1)
263 self.assertTrue(out.empty())
264 self.assertTrue('No tests to run.\n' in err.get())
266 def test_randomize_order(self):
267 # FIXME: verify order was shuffled
268 self.assertTrue(passing_run(['--randomize-order']))
270 def test_run_chunk(self):
271 # Test that we actually select the right chunk
272 all_tests_run = get_tests_run(flatten_batches=True)
273 chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
274 self.assertEquals(all_tests_run[4:8], chunk_tests_run)
276 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
277 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
278 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
279 self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
281 def test_run_force(self):
282 # This raises an exception because we run
283 # failures/expected/exception.html, which is normally SKIPped.
284 self.assertRaises(ValueError, logging_run, ['--force'])
286 def test_run_part(self):
287 # Test that we actually select the right part
288 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
289 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
290 self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
292 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
293 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
294 # last part repeats the first two tests).
295 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
296 self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
298 def test_run_singly(self):
299 batch_tests_run = get_tests_run(['--run-singly'])
300 for batch in batch_tests_run:
301 self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
303 def test_single_file(self):
304 tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
305 self.assertEquals(['passes/text.html'], tests_run)
307 def test_test_list(self):
308 filename = tempfile.mktemp()
309 tmpfile = file(filename, mode='w+')
310 tmpfile.write('passes/text.html')
312 tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
313 self.assertEquals(['passes/text.html'], tests_run)
315 res, out, err, user = logging_run(['--test-list=%s' % filename],
317 self.assertEqual(res, -1)
318 self.assertFalse(err.empty())
320 def test_unexpected_failures(self):
321 # Run tests including the unexpected failures.
322 self._url_opened = None
323 res, out, err, user = logging_run(tests_included=True)
324 self.assertEqual(res, 1)
325 self.assertFalse(out.empty())
326 self.assertFalse(err.empty())
327 self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
329 def test_results_directory_absolute(self):
330 # We run a configuration that should fail, to generate output, then
331 # look for what the output results url was.
333 tmpdir = tempfile.mkdtemp()
334 res, out, err, user = logging_run(['--results-directory=' + tmpdir],
336 self.assertEqual(user.url, os.path.join(tmpdir, 'results.html'))
337 shutil.rmtree(tmpdir, ignore_errors=True)
339 def test_results_directory_default(self):
340 # We run a configuration that should fail, to generate output, then
341 # look for what the output results url was.
343 # This is the default location.
344 res, out, err, user = logging_run(tests_included=True)
345 self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
347 def test_results_directory_relative(self):
348 # We run a configuration that should fail, to generate output, then
349 # look for what the output results url was.
351 res, out, err, user = logging_run(['--results-directory=foo'],
353 self.assertEqual(user.url, '/tmp/foo/results.html')
355 def test_tolerance(self):
356 class ImageDiffTestPort(TestPort):
357 def diff_image(self, expected_contents, actual_contents,
359 self.tolerance_used_for_diff_image = self._options.tolerance
362 def get_port_for_run(args):
363 options, parsed_args = run_webkit_tests.parse_args(args)
364 test_port = ImageDiffTestPort(options=options, user=MockUser())
365 passing_run(args, port_obj=test_port, tests_included=True)
368 base_args = ['--pixel-tests', 'failures/expected/*']
370 # If we pass in an explicit tolerance argument, then that will be used.
371 test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
372 self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
373 test_port = get_port_for_run(base_args + ['--tolerance', '0'])
374 self.assertEqual(0, test_port.tolerance_used_for_diff_image)
376 # Otherwise the port's default tolerance behavior (including ignoring it)
378 test_port = get_port_for_run(base_args)
379 self.assertEqual(None, test_port.tolerance_used_for_diff_image)
381 def test_worker_model__inline(self):
382 self.assertTrue(passing_run(['--worker-model', 'inline']))
384 def test_worker_model__threads(self):
385 self.assertTrue(passing_run(['--worker-model', 'threads']))
387 def test_worker_model__processes(self):
388 self.assertRaises(ValueError, logging_run,
389 ['--worker-model', 'processes'])
391 def test_worker_model__unknown(self):
392 self.assertRaises(ValueError, logging_run,
393 ['--worker-model', 'unknown'])
395 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
399 def _mocked_open(original_open, file_list):
400 def _wrapper(name, mode, encoding):
401 if name.find("-expected.") != -1 and mode.find("w") != -1:
402 # we don't want to actually write new baselines, so stub these out
403 name.replace('\\', '/')
404 file_list.append(name)
405 return original_open(os.devnull, mode, encoding)
406 return original_open(name, mode, encoding)
410 class RebaselineTest(unittest.TestCase):
411 def assertBaselines(self, file_list, file):
412 "assert that the file_list contains the baselines."""
413 for ext in [".txt", ".png", ".checksum"]:
414 baseline = file + "-expected" + ext
415 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
417 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
420 def disabled_test_reset_results(self):
421 # FIXME: This test is disabled until we can rewrite it to use a
424 # Test that we update expectations in place. If the expectation
425 # is missing, update the expected generic location.
427 passing_run(['--pixel-tests',
430 'failures/expected/missing_image.html'],
432 self.assertEqual(len(file_list), 6)
433 self.assertBaselines(file_list,
435 self.assertBaselines(file_list,
436 "data/failures/expected/missing_image")
438 def disabled_test_new_baseline(self):
439 # FIXME: This test is disabled until we can rewrite it to use a
442 # Test that we update the platform expectations. If the expectation
443 # is mssing, then create a new expectation in the platform dir.
445 original_open = codecs.open
447 # Test that we update the platform expectations. If the expectation
448 # is mssing, then create a new expectation in the platform dir.
450 codecs.open = _mocked_open(original_open, file_list)
451 passing_run(['--pixel-tests',
454 'failures/expected/missing_image.html'],
456 self.assertEqual(len(file_list), 6)
457 self.assertBaselines(file_list,
458 "data/platform/test/passes/image")
459 self.assertBaselines(file_list,
460 "data/platform/test/failures/expected/missing_image")
462 codecs.open = original_open
465 class TestRunnerWrapper(run_webkit_tests.TestRunner):
466 def _get_test_input_for_file(self, test_file):
470 class TestRunnerTest(unittest.TestCase):
471 def test_results_html(self):
473 mock_port.relative_test_filename = lambda name: name
474 mock_port.filename_to_uri = lambda name: name
476 runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(),
477 printer=Mock(), message_broker=Mock())
478 expected_html = u"""<html>
480 <title>Layout Test Results (time)</title>
483 <h2>Title (time)</h2>
484 <p><a href='test_path'>test_path</a><br />
488 html = runner._results_html(["test_path"], {}, "Title", override_time="time")
489 self.assertEqual(html, expected_html)
491 def test_shard_tests(self):
492 # Test that _shard_tests in run_webkit_tests.TestRunner really
493 # put the http tests first in the queue.
494 runner = TestRunnerWrapper(port=Mock(), options=Mock(),
495 printer=Mock(), message_broker=Mock())
498 "LayoutTests/websocket/tests/unicode.htm",
499 "LayoutTests/animations/keyframes.html",
500 "LayoutTests/http/tests/security/view-source-no-refresh.html",
501 "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
502 "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
503 "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
504 "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
505 "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
506 "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
509 expected_tests_to_http_lock = set([
510 'LayoutTests/websocket/tests/unicode.htm',
511 'LayoutTests/http/tests/security/view-source-no-refresh.html',
512 'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
513 'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
516 # FIXME: Ideally the HTTP tests don't have to all be in one shard.
517 single_thread_results = runner._shard_tests(test_list, False)
518 multi_thread_results = runner._shard_tests(test_list, True)
520 self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
521 self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
522 self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
523 self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
526 class DryrunTest(unittest.TestCase):
527 # FIXME: it's hard to know which platforms are safe to test; the
528 # chromium platforms require a chromium checkout, and the mac platform
529 # requires fcntl, so it can't be tested on win32, etc. There is
530 # probably a better way of handling this.
531 def test_darwin(self):
532 if sys.platform != "darwin":
535 self.assertTrue(passing_run(['--platform', 'test']))
536 self.assertTrue(passing_run(['--platform', 'dryrun',
538 self.assertTrue(passing_run(['--platform', 'dryrun-mac',
542 self.assertTrue(passing_run(['--platform', 'dryrun-test',
546 if __name__ == '__main__':