09a0c71933a04c84057538ce889cb90a37e73184
[WebKit.git] / WebKitTools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Unit tests for run_webkit_tests."""
32
33 import codecs
34 import itertools
35 import logging
36 import os
37 import Queue
38 import shutil
39 import sys
40 import tempfile
41 import thread
42 import time
43 import threading
44 import unittest
45
46 from webkitpy.common import array_stream
47 from webkitpy.common.system import outputcapture
48 from webkitpy.common.system import user
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
55
56 from webkitpy.thirdparty.mock import Mock
57
58
59 class MockUser():
60     def __init__(self):
61         self.url = None
62
63     def open_url(self, url):
64         self.url = url
65
66
67 def passing_run(extra_args=None, port_obj=None, record_results=False,
68                 tests_included=False):
69     extra_args = extra_args or []
70     args = ['--print', 'nothing']
71     if not '--platform' in extra_args:
72         args.extend(['--platform', 'test'])
73     if not record_results:
74         args.append('--no-record-results')
75     if not '--child-processes' in extra_args:
76         args.extend(['--worker-model', 'inline'])
77     args.extend(extra_args)
78     if not tests_included:
79         # We use the glob to test that globbing works.
80         args.extend(['passes',
81                      'http/tests',
82                      'websocket/tests',
83                      'failures/expected/*'])
84     options, parsed_args = run_webkit_tests.parse_args(args)
85     if not port_obj:
86         port_obj = port.get(port_name=options.platform, options=options,
87                             user=MockUser())
88     res = run_webkit_tests.run(port_obj, options, parsed_args)
89     return res == 0
90
91
92 def logging_run(extra_args=None, port_obj=None, tests_included=False):
93     extra_args = extra_args or []
94     args = ['--no-record-results']
95     if not '--platform' in extra_args:
96         args.extend(['--platform', 'test'])
97     if not '--child-processes' in extra_args:
98         args.extend(['--worker-model', 'inline'])
99     args.extend(extra_args)
100     if not tests_included:
101         args.extend(['passes',
102                      'http/tests',
103                      'websocket/tests',
104                      'failures/expected/*'])
105
106     try:
107         oc = outputcapture.OutputCapture()
108         oc.capture_output()
109         options, parsed_args = run_webkit_tests.parse_args(args)
110         user = MockUser()
111         if not port_obj:
112             port_obj = port.get(port_name=options.platform, options=options,
113                                 user=user)
114         buildbot_output = array_stream.ArrayStream()
115         regular_output = array_stream.ArrayStream()
116         res = run_webkit_tests.run(port_obj, options, parsed_args,
117                                    buildbot_output=buildbot_output,
118                                    regular_output=regular_output)
119     finally:
120         oc.restore_output()
121     return (res, buildbot_output, regular_output, user)
122
123
124 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
125     extra_args = extra_args or []
126     args = [
127         '--print', 'nothing',
128         '--platform', 'test',
129         '--no-record-results',
130         '--worker-model', 'inline']
131     args.extend(extra_args)
132     if not tests_included:
133         # Not including http tests since they get run out of order (that
134         # behavior has its own test, see test_get_test_file_queue)
135         args.extend(['passes', 'failures'])
136     options, parsed_args = run_webkit_tests.parse_args(args)
137     user = MockUser()
138
139     test_batches = []
140
141     class RecordingTestDriver(TestDriver):
142         def __init__(self, port, image_path, options):
143             TestDriver.__init__(self, port, image_path, options, executive=None)
144             self._current_test_batch = None
145
146         def poll(self):
147             # So that we don't create a new driver for every test
148             return None
149
150         def stop(self):
151             self._current_test_batch = None
152
153         def run_test(self, test_input):
154             if self._current_test_batch is None:
155                 self._current_test_batch = []
156                 test_batches.append(self._current_test_batch)
157             test_name = self._port.relative_test_filename(test_input.filename)
158             self._current_test_batch.append(test_name)
159             return TestDriver.run_test(self, test_input)
160
161     class RecordingTestPort(TestPort):
162         def create_driver(self, image_path, options):
163             return RecordingTestDriver(self, image_path, options)
164
165     recording_port = RecordingTestPort(options=options, user=user)
166     logging_run(extra_args=args, port_obj=recording_port, tests_included=True)
167
168     if flatten_batches:
169         return list(itertools.chain(*test_batches))
170
171     return test_batches
172
173 class MainTest(unittest.TestCase):
174     def test_accelerated_compositing(self):
175         # This just tests that we recognize the command line args
176         self.assertTrue(passing_run(['--accelerated-compositing']))
177         self.assertTrue(passing_run(['--no-accelerated-compositing']))
178
179     def test_accelerated_2d_canvas(self):
180         # This just tests that we recognize the command line args
181         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
182         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
183
184     def test_basic(self):
185         self.assertTrue(passing_run())
186
187     def test_batch_size(self):
188         batch_tests_run = get_tests_run(['--batch-size', '2'])
189         for batch in batch_tests_run:
190             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
191
192     def test_child_process_1(self):
193         (res, buildbot_output, regular_output, user) = logging_run(
194              ['--print', 'config', '--child-processes', '1'])
195         self.assertTrue('Running one DumpRenderTree\n'
196                         in regular_output.get())
197
198     def test_child_processes_2(self):
199         (res, buildbot_output, regular_output, user) = logging_run(
200              ['--print', 'config', '--child-processes', '2'])
201         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
202                         in regular_output.get())
203
204     def test_exception_raised(self):
205         self.assertRaises(ValueError, logging_run,
206             ['failures/expected/exception.html'], tests_included=True)
207
208     def test_full_results_html(self):
209         # FIXME: verify html?
210         self.assertTrue(passing_run(['--full-results-html']))
211
212     def test_help_printing(self):
213         res, out, err, user = logging_run(['--help-printing'])
214         self.assertEqual(res, 0)
215         self.assertTrue(out.empty())
216         self.assertFalse(err.empty())
217
218     def test_hung_thread(self):
219         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
220                                           'failures/expected/hang.html'],
221                                           tests_included=True)
222         self.assertEqual(res, 0)
223         self.assertFalse(out.empty())
224         self.assertFalse(err.empty())
225
226     def test_keyboard_interrupt(self):
227         # Note that this also tests running a test marked as SKIP if
228         # you specify it explicitly.
229         self.assertRaises(KeyboardInterrupt, passing_run,
230             ['failures/expected/keyboard.html'], tests_included=True)
231
232     def test_last_results(self):
233         passing_run(['--clobber-old-results'], record_results=True)
234         (res, buildbot_output, regular_output, user) = logging_run(
235             ['--print-last-failures'])
236         self.assertEqual(regular_output.get(), ['\n\n'])
237         self.assertEqual(buildbot_output.get(), [])
238
239     def test_lint_test_files(self):
240         # FIXME:  add errors?
241         res, out, err, user = logging_run(['--lint-test-files'],
242                                           tests_included=True)
243         self.assertEqual(res, 0)
244         self.assertTrue(out.empty())
245         self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
246
247     def test_no_tests_found(self):
248         res, out, err, user = logging_run(['resources'], tests_included=True)
249         self.assertEqual(res, -1)
250         self.assertTrue(out.empty())
251         self.assertTrue('No tests to run.\n' in err.get())
252
253     def test_no_tests_found_2(self):
254         res, out, err, user = logging_run(['foo'], tests_included=True)
255         self.assertEqual(res, -1)
256         self.assertTrue(out.empty())
257         self.assertTrue('No tests to run.\n' in err.get())
258
259     def test_randomize_order(self):
260         # FIXME: verify order was shuffled
261         self.assertTrue(passing_run(['--randomize-order']))
262
263     def test_run_chunk(self):
264         # Test that we actually select the right chunk
265         all_tests_run = get_tests_run(flatten_batches=True)
266         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
267         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
268
269         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
270         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
271         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
272         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
273
274     def test_run_force(self):
275         # This raises an exception because we run
276         # failures/expected/exception.html, which is normally SKIPped.
277         self.assertRaises(ValueError, logging_run, ['--force'])
278
279     def test_run_part(self):
280         # Test that we actually select the right part
281         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
282         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
283         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
284
285         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
286         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
287         # last part repeats the first two tests).
288         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
289         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
290
291     def test_run_singly(self):
292         batch_tests_run = get_tests_run(['--run-singly'])
293         for batch in batch_tests_run:
294             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
295
296     def test_single_file(self):
297         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
298         self.assertEquals(['passes/text.html'], tests_run)
299
300     def test_test_list(self):
301         filename = tempfile.mktemp()
302         tmpfile = file(filename, mode='w+')
303         tmpfile.write('passes/text.html')
304         tmpfile.close()
305         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
306         self.assertEquals(['passes/text.html'], tests_run)
307         os.remove(filename)
308         res, out, err, user = logging_run(['--test-list=%s' % filename],
309                                           tests_included=True)
310         self.assertEqual(res, -1)
311         self.assertFalse(err.empty())
312
313     def test_unexpected_failures(self):
314         # Run tests including the unexpected failures.
315         self._url_opened = None
316         res, out, err, user = logging_run(tests_included=True)
317         self.assertEqual(res, 1)
318         self.assertFalse(out.empty())
319         self.assertFalse(err.empty())
320         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
321
322     def test_results_directory_absolute(self):
323         # We run a configuration that should fail, to generate output, then
324         # look for what the output results url was.
325
326         tmpdir = tempfile.mkdtemp()
327         res, out, err, user = logging_run(['--results-directory=' + tmpdir],
328                                           tests_included=True)
329         self.assertEqual(user.url, os.path.join(tmpdir, 'results.html'))
330         shutil.rmtree(tmpdir, ignore_errors=True)
331
332     def test_results_directory_default(self):
333         # We run a configuration that should fail, to generate output, then
334         # look for what the output results url was.
335
336         # This is the default location.
337         res, out, err, user = logging_run(tests_included=True)
338         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
339
340     def test_results_directory_relative(self):
341         # We run a configuration that should fail, to generate output, then
342         # look for what the output results url was.
343
344         res, out, err, user = logging_run(['--results-directory=foo'],
345                                           tests_included=True)
346         self.assertEqual(user.url, '/tmp/foo/results.html')
347
348     def test_tolerance(self):
349         class ImageDiffTestPort(TestPort):
350             def diff_image(self, expected_contents, actual_contents,
351                    diff_filename=None):
352                 self.tolerance_used_for_diff_image = self._options.tolerance
353                 return True
354
355         def get_port_for_run(args):
356             options, parsed_args = run_webkit_tests.parse_args(args)
357             test_port = ImageDiffTestPort(options=options, user=MockUser())
358             passing_run(args, port_obj=test_port, tests_included=True)
359             return test_port
360
361         base_args = ['--pixel-tests', 'failures/expected/*']
362
363         # If we pass in an explicit tolerance argument, then that will be used.
364         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
365         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
366         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
367         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
368
369         # Otherwise the port's default tolerance behavior (including ignoring it)
370         # should be used.
371         test_port = get_port_for_run(base_args)
372         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
373
374     def test_worker_model__inline(self):
375         self.assertTrue(passing_run(['--worker-model', 'inline']))
376
377     def test_worker_model__threads(self):
378         self.assertTrue(passing_run(['--worker-model', 'threads']))
379
380     def test_worker_model__processes(self):
381         self.assertRaises(SystemExit, logging_run,
382                           ['--worker-model', 'processes'])
383
384     def test_worker_model__unknown(self):
385         self.assertRaises(SystemExit, logging_run,
386                           ['--worker-model', 'unknown'])
387
388 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
389
390
391
392 def _mocked_open(original_open, file_list):
393     def _wrapper(name, mode, encoding):
394         if name.find("-expected.") != -1 and mode.find("w") != -1:
395             # we don't want to actually write new baselines, so stub these out
396             name.replace('\\', '/')
397             file_list.append(name)
398             return original_open(os.devnull, mode, encoding)
399         return original_open(name, mode, encoding)
400     return _wrapper
401
402
403 class RebaselineTest(unittest.TestCase):
404     def assertBaselines(self, file_list, file):
405         "assert that the file_list contains the baselines."""
406         for ext in [".txt", ".png", ".checksum"]:
407             baseline = file + "-expected" + ext
408             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
409
410     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
411     # supposed to be.
412
413     def disabled_test_reset_results(self):
414         # FIXME: This test is disabled until we can rewrite it to use a
415         # mock filesystem.
416         #
417         # Test that we update expectations in place. If the expectation
418         # is missing, update the expected generic location.
419         file_list = []
420         passing_run(['--pixel-tests',
421                         '--reset-results',
422                         'passes/image.html',
423                         'failures/expected/missing_image.html'],
424                         tests_included=True)
425         self.assertEqual(len(file_list), 6)
426         self.assertBaselines(file_list,
427             "data/passes/image")
428         self.assertBaselines(file_list,
429             "data/failures/expected/missing_image")
430
431     def disabled_test_new_baseline(self):
432         # FIXME: This test is disabled until we can rewrite it to use a
433         # mock filesystem.
434         #
435         # Test that we update the platform expectations. If the expectation
436         # is mssing, then create a new expectation in the platform dir.
437         file_list = []
438         original_open = codecs.open
439         try:
440             # Test that we update the platform expectations. If the expectation
441             # is mssing, then create a new expectation in the platform dir.
442             file_list = []
443             codecs.open = _mocked_open(original_open, file_list)
444             passing_run(['--pixel-tests',
445                          '--new-baseline',
446                          'passes/image.html',
447                          'failures/expected/missing_image.html'],
448                         tests_included=True)
449             self.assertEqual(len(file_list), 6)
450             self.assertBaselines(file_list,
451                 "data/platform/test/passes/image")
452             self.assertBaselines(file_list,
453                 "data/platform/test/failures/expected/missing_image")
454         finally:
455             codecs.open = original_open
456
457
458 class TestRunnerWrapper(run_webkit_tests.TestRunner):
459     def _get_test_input_for_file(self, test_file):
460         return test_file
461
462
463 class TestRunnerTest(unittest.TestCase):
464     def test_results_html(self):
465         mock_port = Mock()
466         mock_port.relative_test_filename = lambda name: name
467         mock_port.filename_to_uri = lambda name: name
468
469         runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(), printer=Mock())
470         expected_html = u"""<html>
471   <head>
472     <title>Layout Test Results (time)</title>
473   </head>
474   <body>
475     <h2>Title (time)</h2>
476         <p><a href='test_path'>test_path</a><br />
477 </p>
478 </body></html>
479 """
480         html = runner._results_html(["test_path"], {}, "Title", override_time="time")
481         self.assertEqual(html, expected_html)
482
483     def test_shard_tests(self):
484         # Test that _shard_tests in run_webkit_tests.TestRunner really
485         # put the http tests first in the queue.
486         runner = TestRunnerWrapper(port=Mock(), options=Mock(), printer=Mock())
487
488         test_list = [
489           "LayoutTests/websocket/tests/unicode.htm",
490           "LayoutTests/animations/keyframes.html",
491           "LayoutTests/http/tests/security/view-source-no-refresh.html",
492           "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
493           "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
494           "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
495           "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
496           "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
497           "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
498         ]
499
500         expected_tests_to_http_lock = set([
501           'LayoutTests/websocket/tests/unicode.htm',
502           'LayoutTests/http/tests/security/view-source-no-refresh.html',
503           'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
504           'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
505         ])
506
507         # FIXME: Ideally the HTTP tests don't have to all be in one shard.
508         single_thread_results = runner._shard_tests(test_list, False)
509         multi_thread_results = runner._shard_tests(test_list, True)
510
511         self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
512         self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
513         self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
514         self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
515
516
517 class DryrunTest(unittest.TestCase):
518     # FIXME: it's hard to know which platforms are safe to test; the
519     # chromium platforms require a chromium checkout, and the mac platform
520     # requires fcntl, so it can't be tested on win32, etc. There is
521     # probably a better way of handling this.
522     def test_darwin(self):
523         if sys.platform != "darwin":
524             return
525
526         self.assertTrue(passing_run(['--platform', 'test']))
527         self.assertTrue(passing_run(['--platform', 'dryrun',
528                                      'fast/html']))
529         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
530                                      'fast/html']))
531
532     def test_test(self):
533         self.assertTrue(passing_run(['--platform', 'dryrun-test',
534                                            '--pixel-tests']))
535
536
537 if __name__ == '__main__':
538     unittest.main()