2010-12-01 Dirk Pranke <dpranke@chromium.org>
[WebKit.git] / WebKitTools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Unit tests for run_webkit_tests."""
32
33 import codecs
34 import itertools
35 import logging
36 import os
37 import Queue
38 import shutil
39 import sys
40 import tempfile
41 import thread
42 import time
43 import threading
44 import unittest
45
46 from webkitpy.common import array_stream
47 from webkitpy.common.system import outputcapture
48 from webkitpy.common.system import user
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
55
56 from webkitpy.thirdparty.mock import Mock
57
58
59 class MockUser():
60     def __init__(self):
61         self.url = None
62
63     def open_url(self, url):
64         self.url = url
65
66
67 def passing_run(extra_args=None, port_obj=None, record_results=False,
68                 tests_included=False):
69     extra_args = extra_args or []
70     args = ['--print', 'nothing']
71     if not '--platform' in extra_args:
72         args.extend(['--platform', 'test'])
73     if not record_results:
74         args.append('--no-record-results')
75     if not '--child-processes' in extra_args:
76         args.extend(['--worker-model', 'inline'])
77     args.extend(extra_args)
78     if not tests_included:
79         # We use the glob to test that globbing works.
80         args.extend(['passes',
81                      'http/tests',
82                      'websocket/tests',
83                      'failures/expected/*'])
84     options, parsed_args = run_webkit_tests.parse_args(args)
85     if not port_obj:
86         port_obj = port.get(port_name=options.platform, options=options,
87                             user=MockUser())
88     res = run_webkit_tests.run(port_obj, options, parsed_args)
89     return res == 0
90
91
92 def logging_run(extra_args=None, port_obj=None, tests_included=False):
93     extra_args = extra_args or []
94     args = ['--no-record-results']
95     if not '--platform' in extra_args:
96         args.extend(['--platform', 'test'])
97     if not '--child-processes' in extra_args:
98         args.extend(['--worker-model', 'inline'])
99     args.extend(extra_args)
100     if not tests_included:
101         args.extend(['passes',
102                      'http/tests',
103                      'websocket/tests',
104                      'failures/expected/*'])
105
106     oc = outputcapture.OutputCapture()
107     try:
108         oc.capture_output()
109         options, parsed_args = run_webkit_tests.parse_args(args)
110         user = MockUser()
111         if not port_obj:
112             port_obj = port.get(port_name=options.platform, options=options,
113                                 user=user)
114         buildbot_output = array_stream.ArrayStream()
115         regular_output = array_stream.ArrayStream()
116         res = run_webkit_tests.run(port_obj, options, parsed_args,
117                                    buildbot_output=buildbot_output,
118                                    regular_output=regular_output)
119     finally:
120         oc.restore_output()
121     return (res, buildbot_output, regular_output, user)
122
123
124 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
125     extra_args = extra_args or []
126     args = [
127         '--print', 'nothing',
128         '--platform', 'test',
129         '--no-record-results',
130         '--worker-model', 'inline']
131     args.extend(extra_args)
132     if not tests_included:
133         # Not including http tests since they get run out of order (that
134         # behavior has its own test, see test_get_test_file_queue)
135         args.extend(['passes', 'failures'])
136     options, parsed_args = run_webkit_tests.parse_args(args)
137     user = MockUser()
138
139     test_batches = []
140
141     class RecordingTestDriver(TestDriver):
142         def __init__(self, port, worker_number):
143             TestDriver.__init__(self, port, worker_number)
144             self._current_test_batch = None
145
146         def poll(self):
147             # So that we don't create a new driver for every test
148             return None
149
150         def stop(self):
151             self._current_test_batch = None
152
153         def run_test(self, test_input):
154             if self._current_test_batch is None:
155                 self._current_test_batch = []
156                 test_batches.append(self._current_test_batch)
157             test_name = self._port.relative_test_filename(test_input.filename)
158             self._current_test_batch.append(test_name)
159             return TestDriver.run_test(self, test_input)
160
161     class RecordingTestPort(TestPort):
162         def create_driver(self, worker_number):
163             return RecordingTestDriver(self, worker_number)
164
165     recording_port = RecordingTestPort(options=options, user=user)
166     logging_run(extra_args=args, port_obj=recording_port, tests_included=True)
167
168     if flatten_batches:
169         return list(itertools.chain(*test_batches))
170
171     return test_batches
172
173 class MainTest(unittest.TestCase):
174     def test_accelerated_compositing(self):
175         # This just tests that we recognize the command line args
176         self.assertTrue(passing_run(['--accelerated-compositing']))
177         self.assertTrue(passing_run(['--no-accelerated-compositing']))
178
179     def test_accelerated_2d_canvas(self):
180         # This just tests that we recognize the command line args
181         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
182         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
183
184     def test_basic(self):
185         self.assertTrue(passing_run())
186
187     def test_batch_size(self):
188         batch_tests_run = get_tests_run(['--batch-size', '2'])
189         for batch in batch_tests_run:
190             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
191
192     def test_child_process_1(self):
193         (res, buildbot_output, regular_output, user) = logging_run(
194              ['--print', 'config', '--child-processes', '1'])
195         self.assertTrue('Running one DumpRenderTree\n'
196                         in regular_output.get())
197
198     def test_child_processes_2(self):
199         (res, buildbot_output, regular_output, user) = logging_run(
200              ['--print', 'config', '--child-processes', '2'])
201         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
202                         in regular_output.get())
203
204     def test_dryrun(self):
205         batch_tests_run = get_tests_run(['--dry-run'])
206         self.assertEqual(batch_tests_run, [])
207
208         batch_tests_run = get_tests_run(['-n'])
209         self.assertEqual(batch_tests_run, [])
210
211     def test_exception_raised(self):
212         self.assertRaises(ValueError, logging_run,
213             ['failures/expected/exception.html'], tests_included=True)
214
215     def test_full_results_html(self):
216         # FIXME: verify html?
217         self.assertTrue(passing_run(['--full-results-html']))
218
219     def test_help_printing(self):
220         res, out, err, user = logging_run(['--help-printing'])
221         self.assertEqual(res, 0)
222         self.assertTrue(out.empty())
223         self.assertFalse(err.empty())
224
225     def test_hung_thread(self):
226         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
227                                           'failures/expected/hang.html'],
228                                           tests_included=True)
229         self.assertEqual(res, 0)
230         self.assertFalse(out.empty())
231         self.assertFalse(err.empty())
232
233     def test_keyboard_interrupt(self):
234         # Note that this also tests running a test marked as SKIP if
235         # you specify it explicitly.
236         self.assertRaises(KeyboardInterrupt, logging_run,
237             ['failures/expected/keyboard.html'], tests_included=True)
238
239     def test_last_results(self):
240         passing_run(['--clobber-old-results'], record_results=True)
241         (res, buildbot_output, regular_output, user) = logging_run(
242             ['--print-last-failures'])
243         self.assertEqual(regular_output.get(), ['\n\n'])
244         self.assertEqual(buildbot_output.get(), [])
245
246     def test_lint_test_files(self):
247         # FIXME:  add errors?
248         res, out, err, user = logging_run(['--lint-test-files'],
249                                           tests_included=True)
250         self.assertEqual(res, 0)
251         self.assertTrue(out.empty())
252         self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
253
254     def test_no_tests_found(self):
255         res, out, err, user = logging_run(['resources'], tests_included=True)
256         self.assertEqual(res, -1)
257         self.assertTrue(out.empty())
258         self.assertTrue('No tests to run.\n' in err.get())
259
260     def test_no_tests_found_2(self):
261         res, out, err, user = logging_run(['foo'], tests_included=True)
262         self.assertEqual(res, -1)
263         self.assertTrue(out.empty())
264         self.assertTrue('No tests to run.\n' in err.get())
265
266     def test_randomize_order(self):
267         # FIXME: verify order was shuffled
268         self.assertTrue(passing_run(['--randomize-order']))
269
270     def test_run_chunk(self):
271         # Test that we actually select the right chunk
272         all_tests_run = get_tests_run(flatten_batches=True)
273         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
274         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
275
276         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
277         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
278         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
279         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
280
281     def test_run_force(self):
282         # This raises an exception because we run
283         # failures/expected/exception.html, which is normally SKIPped.
284         self.assertRaises(ValueError, logging_run, ['--force'])
285
286     def test_run_part(self):
287         # Test that we actually select the right part
288         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
289         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
290         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
291
292         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
293         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
294         # last part repeats the first two tests).
295         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
296         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
297
298     def test_run_singly(self):
299         batch_tests_run = get_tests_run(['--run-singly'])
300         for batch in batch_tests_run:
301             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
302
303     def test_single_file(self):
304         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
305         self.assertEquals(['passes/text.html'], tests_run)
306
307     def test_test_list(self):
308         filename = tempfile.mktemp()
309         tmpfile = file(filename, mode='w+')
310         tmpfile.write('passes/text.html')
311         tmpfile.close()
312         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
313         self.assertEquals(['passes/text.html'], tests_run)
314         os.remove(filename)
315         res, out, err, user = logging_run(['--test-list=%s' % filename],
316                                           tests_included=True)
317         self.assertEqual(res, -1)
318         self.assertFalse(err.empty())
319
320     def test_unexpected_failures(self):
321         # Run tests including the unexpected failures.
322         self._url_opened = None
323         res, out, err, user = logging_run(tests_included=True)
324         self.assertEqual(res, 1)
325         self.assertFalse(out.empty())
326         self.assertFalse(err.empty())
327         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
328
329     def test_results_directory_absolute(self):
330         # We run a configuration that should fail, to generate output, then
331         # look for what the output results url was.
332
333         tmpdir = tempfile.mkdtemp()
334         res, out, err, user = logging_run(['--results-directory=' + tmpdir],
335                                           tests_included=True)
336         self.assertEqual(user.url, os.path.join(tmpdir, 'results.html'))
337         shutil.rmtree(tmpdir, ignore_errors=True)
338
339     def test_results_directory_default(self):
340         # We run a configuration that should fail, to generate output, then
341         # look for what the output results url was.
342
343         # This is the default location.
344         res, out, err, user = logging_run(tests_included=True)
345         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
346
347     def test_results_directory_relative(self):
348         # We run a configuration that should fail, to generate output, then
349         # look for what the output results url was.
350
351         res, out, err, user = logging_run(['--results-directory=foo'],
352                                           tests_included=True)
353         self.assertEqual(user.url, '/tmp/foo/results.html')
354
355     def test_tolerance(self):
356         class ImageDiffTestPort(TestPort):
357             def diff_image(self, expected_contents, actual_contents,
358                    diff_filename=None):
359                 self.tolerance_used_for_diff_image = self._options.tolerance
360                 return True
361
362         def get_port_for_run(args):
363             options, parsed_args = run_webkit_tests.parse_args(args)
364             test_port = ImageDiffTestPort(options=options, user=MockUser())
365             passing_run(args, port_obj=test_port, tests_included=True)
366             return test_port
367
368         base_args = ['--pixel-tests', 'failures/expected/*']
369
370         # If we pass in an explicit tolerance argument, then that will be used.
371         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
372         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
373         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
374         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
375
376         # Otherwise the port's default tolerance behavior (including ignoring it)
377         # should be used.
378         test_port = get_port_for_run(base_args)
379         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
380
381     def test_worker_model__inline(self):
382         self.assertTrue(passing_run(['--worker-model', 'inline']))
383
384     def test_worker_model__threads(self):
385         self.assertTrue(passing_run(['--worker-model', 'threads']))
386
387     def test_worker_model__processes(self):
388         self.assertRaises(ValueError, logging_run,
389                           ['--worker-model', 'processes'])
390
391     def test_worker_model__unknown(self):
392         self.assertRaises(ValueError, logging_run,
393                           ['--worker-model', 'unknown'])
394
395 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
396
397
398
399 def _mocked_open(original_open, file_list):
400     def _wrapper(name, mode, encoding):
401         if name.find("-expected.") != -1 and mode.find("w") != -1:
402             # we don't want to actually write new baselines, so stub these out
403             name.replace('\\', '/')
404             file_list.append(name)
405             return original_open(os.devnull, mode, encoding)
406         return original_open(name, mode, encoding)
407     return _wrapper
408
409
410 class RebaselineTest(unittest.TestCase):
411     def assertBaselines(self, file_list, file):
412         "assert that the file_list contains the baselines."""
413         for ext in [".txt", ".png", ".checksum"]:
414             baseline = file + "-expected" + ext
415             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
416
417     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
418     # supposed to be.
419
420     def disabled_test_reset_results(self):
421         # FIXME: This test is disabled until we can rewrite it to use a
422         # mock filesystem.
423         #
424         # Test that we update expectations in place. If the expectation
425         # is missing, update the expected generic location.
426         file_list = []
427         passing_run(['--pixel-tests',
428                         '--reset-results',
429                         'passes/image.html',
430                         'failures/expected/missing_image.html'],
431                         tests_included=True)
432         self.assertEqual(len(file_list), 6)
433         self.assertBaselines(file_list,
434             "data/passes/image")
435         self.assertBaselines(file_list,
436             "data/failures/expected/missing_image")
437
438     def disabled_test_new_baseline(self):
439         # FIXME: This test is disabled until we can rewrite it to use a
440         # mock filesystem.
441         #
442         # Test that we update the platform expectations. If the expectation
443         # is mssing, then create a new expectation in the platform dir.
444         file_list = []
445         original_open = codecs.open
446         try:
447             # Test that we update the platform expectations. If the expectation
448             # is mssing, then create a new expectation in the platform dir.
449             file_list = []
450             codecs.open = _mocked_open(original_open, file_list)
451             passing_run(['--pixel-tests',
452                          '--new-baseline',
453                          'passes/image.html',
454                          'failures/expected/missing_image.html'],
455                         tests_included=True)
456             self.assertEqual(len(file_list), 6)
457             self.assertBaselines(file_list,
458                 "data/platform/test/passes/image")
459             self.assertBaselines(file_list,
460                 "data/platform/test/failures/expected/missing_image")
461         finally:
462             codecs.open = original_open
463
464
465 class TestRunnerWrapper(run_webkit_tests.TestRunner):
466     def _get_test_input_for_file(self, test_file):
467         return test_file
468
469
470 class TestRunnerTest(unittest.TestCase):
471     def test_results_html(self):
472         mock_port = Mock()
473         mock_port.relative_test_filename = lambda name: name
474         mock_port.filename_to_uri = lambda name: name
475
476         runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(),
477             printer=Mock(), message_broker=Mock())
478         expected_html = u"""<html>
479   <head>
480     <title>Layout Test Results (time)</title>
481   </head>
482   <body>
483     <h2>Title (time)</h2>
484         <p><a href='test_path'>test_path</a><br />
485 </p>
486 </body></html>
487 """
488         html = runner._results_html(["test_path"], {}, "Title", override_time="time")
489         self.assertEqual(html, expected_html)
490
491     def test_shard_tests(self):
492         # Test that _shard_tests in run_webkit_tests.TestRunner really
493         # put the http tests first in the queue.
494         runner = TestRunnerWrapper(port=Mock(), options=Mock(),
495             printer=Mock(), message_broker=Mock())
496
497         test_list = [
498           "LayoutTests/websocket/tests/unicode.htm",
499           "LayoutTests/animations/keyframes.html",
500           "LayoutTests/http/tests/security/view-source-no-refresh.html",
501           "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
502           "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
503           "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
504           "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
505           "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
506           "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
507         ]
508
509         expected_tests_to_http_lock = set([
510           'LayoutTests/websocket/tests/unicode.htm',
511           'LayoutTests/http/tests/security/view-source-no-refresh.html',
512           'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
513           'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
514         ])
515
516         # FIXME: Ideally the HTTP tests don't have to all be in one shard.
517         single_thread_results = runner._shard_tests(test_list, False)
518         multi_thread_results = runner._shard_tests(test_list, True)
519
520         self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
521         self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
522         self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
523         self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
524
525
526 class DryrunTest(unittest.TestCase):
527     # FIXME: it's hard to know which platforms are safe to test; the
528     # chromium platforms require a chromium checkout, and the mac platform
529     # requires fcntl, so it can't be tested on win32, etc. There is
530     # probably a better way of handling this.
531     def test_darwin(self):
532         if sys.platform != "darwin":
533             return
534
535         self.assertTrue(passing_run(['--platform', 'test']))
536         self.assertTrue(passing_run(['--platform', 'dryrun',
537                                      'fast/html']))
538         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
539                                      'fast/html']))
540
541     def test_test(self):
542         self.assertTrue(passing_run(['--platform', 'dryrun-test',
543                                            '--pixel-tests']))
544
545
546 if __name__ == '__main__':
547     unittest.main()