891797d505c48e18ec9b497165907216cfce91cd
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Unit tests for run_webkit_tests."""
32
33 from __future__ import with_statement
34
35 import codecs
36 import itertools
37 import logging
38 import Queue
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common import array_stream
46 from webkitpy.common.system import outputcapture
47 from webkitpy.common.system import filesystem_mock
48 from webkitpy.tool import mocktool
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
55
56 from webkitpy.thirdparty.mock import Mock
57
58
59 def parse_args(extra_args=None, record_results=False, tests_included=False,
60                print_nothing=True):
61     extra_args = extra_args or []
62     if print_nothing:
63         args = ['--print', 'nothing']
64     else:
65         args = []
66     if not '--platform' in extra_args:
67         args.extend(['--platform', 'test'])
68     if not record_results:
69         args.append('--no-record-results')
70     if not '--child-processes' in extra_args:
71         args.extend(['--worker-model', 'old-inline'])
72     args.extend(extra_args)
73     if not tests_included:
74         # We use the glob to test that globbing works.
75         args.extend(['passes',
76                      'http/tests',
77                      'websocket/tests',
78                      'failures/expected/*'])
79     return run_webkit_tests.parse_args(args)
80
81
82 def passing_run(extra_args=None, port_obj=None, record_results=False,
83                 tests_included=False, filesystem=None):
84     options, parsed_args = parse_args(extra_args, record_results,
85                                       tests_included)
86     if not port_obj:
87         port_obj = port.get(port_name=options.platform, options=options,
88                             user=mocktool.MockUser(), filesystem=filesystem)
89     res = run_webkit_tests.run(port_obj, options, parsed_args)
90     return res == 0
91
92
93 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
94     options, parsed_args = parse_args(extra_args=extra_args,
95                                       record_results=record_results,
96                                       tests_included=tests_included,
97                                       print_nothing=False)
98     user = mocktool.MockUser()
99     if not port_obj:
100         port_obj = port.get(port_name=options.platform, options=options,
101                             user=user, filesystem=filesystem)
102
103     res, buildbot_output, regular_output = run_and_capture(port_obj, options,
104                                                            parsed_args)
105     return (res, buildbot_output, regular_output, user)
106
107
108 def run_and_capture(port_obj, options, parsed_args):
109     oc = outputcapture.OutputCapture()
110     try:
111         oc.capture_output()
112         buildbot_output = array_stream.ArrayStream()
113         regular_output = array_stream.ArrayStream()
114         res = run_webkit_tests.run(port_obj, options, parsed_args,
115                                    buildbot_output=buildbot_output,
116                                    regular_output=regular_output)
117     finally:
118         oc.restore_output()
119     return (res, buildbot_output, regular_output)
120
121
122 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False, filesystem=None):
123     extra_args = extra_args or []
124     if not tests_included:
125         # Not including http tests since they get run out of order (that
126         # behavior has its own test, see test_get_test_file_queue)
127         extra_args = ['passes', 'failures'] + extra_args
128     options, parsed_args = parse_args(extra_args, tests_included=True)
129
130     user = mocktool.MockUser()
131
132     test_batches = []
133
134     class RecordingTestDriver(TestDriver):
135         def __init__(self, port, worker_number):
136             TestDriver.__init__(self, port, worker_number)
137             self._current_test_batch = None
138
139         def poll(self):
140             # So that we don't create a new driver for every test
141             return None
142
143         def stop(self):
144             self._current_test_batch = None
145
146         def run_test(self, test_input):
147             if self._current_test_batch is None:
148                 self._current_test_batch = []
149                 test_batches.append(self._current_test_batch)
150             test_name = self._port.relative_test_filename(test_input.filename)
151             self._current_test_batch.append(test_name)
152             return TestDriver.run_test(self, test_input)
153
154     class RecordingTestPort(TestPort):
155         def create_driver(self, worker_number):
156             return RecordingTestDriver(self, worker_number)
157
158     recording_port = RecordingTestPort(options=options, user=user, filesystem=filesystem)
159     run_and_capture(recording_port, options, parsed_args)
160
161     if flatten_batches:
162         return list(itertools.chain(*test_batches))
163
164     return test_batches
165
166
167 class MainTest(unittest.TestCase):
168     def test_accelerated_compositing(self):
169         # This just tests that we recognize the command line args
170         self.assertTrue(passing_run(['--accelerated-compositing']))
171         self.assertTrue(passing_run(['--no-accelerated-compositing']))
172
173     def test_accelerated_2d_canvas(self):
174         # This just tests that we recognize the command line args
175         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
176         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
177
178     def test_basic(self):
179         self.assertTrue(passing_run())
180
181     def test_batch_size(self):
182         batch_tests_run = get_tests_run(['--batch-size', '2'])
183         for batch in batch_tests_run:
184             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
185
186     def test_child_process_1(self):
187         (res, buildbot_output, regular_output, user) = logging_run(
188              ['--print', 'config', '--child-processes', '1'])
189         self.assertTrue('Running one DumpRenderTree\n'
190                         in regular_output.get())
191
192     def test_child_processes_2(self):
193         (res, buildbot_output, regular_output, user) = logging_run(
194              ['--print', 'config', '--child-processes', '2'])
195         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
196                         in regular_output.get())
197
198     def test_dryrun(self):
199         batch_tests_run = get_tests_run(['--dry-run'])
200         self.assertEqual(batch_tests_run, [])
201
202         batch_tests_run = get_tests_run(['-n'])
203         self.assertEqual(batch_tests_run, [])
204
205     def test_exception_raised(self):
206         self.assertRaises(ValueError, logging_run,
207             ['failures/expected/exception.html'], tests_included=True)
208
209     def test_full_results_html(self):
210         # FIXME: verify html?
211         res, out, err, user = logging_run(['--full-results-html'])
212         self.assertEqual(res, 0)
213
214     def test_help_printing(self):
215         res, out, err, user = logging_run(['--help-printing'])
216         self.assertEqual(res, 0)
217         self.assertTrue(out.empty())
218         self.assertFalse(err.empty())
219
220     def test_hung_thread(self):
221         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
222                                           'failures/expected/hang.html'],
223                                           tests_included=True)
224         self.assertEqual(res, 0)
225         self.assertFalse(out.empty())
226         self.assertFalse(err.empty())
227
228     def test_keyboard_interrupt(self):
229         # Note that this also tests running a test marked as SKIP if
230         # you specify it explicitly.
231         self.assertRaises(KeyboardInterrupt, logging_run,
232             ['failures/expected/keyboard.html'], tests_included=True)
233
234     def test_last_results(self):
235         fs = port.unit_test_filesystem()
236         # We do a logging run here instead of a passing run in order to
237         # suppress the output from the json generator.
238         (res, buildbot_output, regular_output, user) = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs)
239         (res, buildbot_output, regular_output, user) = logging_run(
240             ['--print-last-failures'], filesystem=fs)
241         self.assertEqual(regular_output.get(), ['\n\n'])
242         self.assertEqual(buildbot_output.get(), [])
243
244     def test_lint_test_files(self):
245         res, out, err, user = logging_run(['--lint-test-files'])
246         self.assertEqual(res, 0)
247         self.assertTrue(out.empty())
248         self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
249
250     def test_lint_test_files__errors(self):
251         options, parsed_args = parse_args(['--lint-test-files'])
252         user = mocktool.MockUser()
253         port_obj = port.get(options.platform, options=options, user=user)
254         port_obj.test_expectations = lambda: "# syntax error"
255         res, out, err = run_and_capture(port_obj, options, parsed_args)
256
257         self.assertEqual(res, -1)
258         self.assertTrue(out.empty())
259         self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
260
261     def test_no_tests_found(self):
262         res, out, err, user = logging_run(['resources'], tests_included=True)
263         self.assertEqual(res, -1)
264         self.assertTrue(out.empty())
265         self.assertTrue('No tests to run.\n' in err.get())
266
267     def test_no_tests_found_2(self):
268         res, out, err, user = logging_run(['foo'], tests_included=True)
269         self.assertEqual(res, -1)
270         self.assertTrue(out.empty())
271         self.assertTrue('No tests to run.\n' in err.get())
272
273     def test_randomize_order(self):
274         # FIXME: verify order was shuffled
275         self.assertTrue(passing_run(['--randomize-order']))
276
277     def test_run_chunk(self):
278         # Test that we actually select the right chunk
279         all_tests_run = get_tests_run(flatten_batches=True)
280         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
281         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
282
283         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
284         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
285         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
286         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
287
288     def test_run_force(self):
289         # This raises an exception because we run
290         # failures/expected/exception.html, which is normally SKIPped.
291         self.assertRaises(ValueError, logging_run, ['--force'])
292
293     def test_run_part(self):
294         # Test that we actually select the right part
295         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
296         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
297         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
298
299         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
300         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
301         # last part repeats the first two tests).
302         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
303         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
304
305     def test_run_singly(self):
306         batch_tests_run = get_tests_run(['--run-singly'])
307         for batch in batch_tests_run:
308             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
309
310     def test_single_file(self):
311         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
312         self.assertEquals(['passes/text.html'], tests_run)
313
314     def test_single_file_with_prefix(self):
315         tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
316         self.assertEquals(['passes/text.html'], tests_run)
317
318     def test_single_skipped_file(self):
319         tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
320         self.assertEquals([], tests_run)
321
322     def test_test_list(self):
323         fs = port.unit_test_filesystem()
324         filename = '/tmp/foo.txt'
325         fs.write_text_file(filename, 'passes/text.html')
326         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
327         self.assertEquals(['passes/text.html'], tests_run)
328         fs.remove(filename)
329         res, out, err, user = logging_run(['--test-list=%s' % filename],
330                                           tests_included=True, filesystem=fs)
331         self.assertEqual(res, -1)
332         self.assertFalse(err.empty())
333
334     def test_test_list_with_prefix(self):
335         fs = port.unit_test_filesystem()
336         filename = '/tmp/foo.txt'
337         fs.write_text_file(filename, 'LayoutTests/passes/text.html')
338         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
339         self.assertEquals(['passes/text.html'], tests_run)
340
341     def test_unexpected_failures(self):
342         # Run tests including the unexpected failures.
343         self._url_opened = None
344         res, out, err, user = logging_run(tests_included=True)
345         self.assertEqual(res, 3)
346         self.assertFalse(out.empty())
347         self.assertFalse(err.empty())
348         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
349
350     def test_exit_after_n_failures(self):
351         # Unexpected failures should result in tests stopping.
352         tests_run = get_tests_run([
353                 'failures/unexpected/text-image-checksum.html',
354                 'passes/text.html',
355                 '--exit-after-n-failures', '1',
356             ],
357             tests_included=True,
358             flatten_batches=True)
359         self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
360
361         # But we'll keep going for expected ones.
362         tests_run = get_tests_run([
363                 'failures/expected/text.html',
364                 'passes/text.html',
365                 '--exit-after-n-failures', '1',
366             ],
367             tests_included=True,
368             flatten_batches=True)
369         self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
370
371     def test_exit_after_n_crashes(self):
372         # Unexpected crashes should result in tests stopping.
373         tests_run = get_tests_run([
374                 'failures/unexpected/crash.html',
375                 'passes/text.html',
376                 '--exit-after-n-crashes-or-timeouts', '1',
377             ],
378             tests_included=True,
379             flatten_batches=True)
380         self.assertEquals(['failures/unexpected/crash.html'], tests_run)
381
382         # Same with timeouts.
383         tests_run = get_tests_run([
384                 'failures/unexpected/timeout.html',
385                 'passes/text.html',
386                 '--exit-after-n-crashes-or-timeouts', '1',
387             ],
388             tests_included=True,
389             flatten_batches=True)
390         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
391
392         # But we'll keep going for expected ones.
393         tests_run = get_tests_run([
394                 'failures/expected/crash.html',
395                 'passes/text.html',
396                 '--exit-after-n-crashes-or-timeouts', '1',
397             ],
398             tests_included=True,
399             flatten_batches=True)
400         self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
401
402     def test_results_directory_absolute(self):
403         # We run a configuration that should fail, to generate output, then
404         # look for what the output results url was.
405
406         fs = port.unit_test_filesystem()
407         with fs.mkdtemp() as tmpdir:
408             res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
409                                               tests_included=True, filesystem=fs)
410             self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
411
412     def test_results_directory_default(self):
413         # We run a configuration that should fail, to generate output, then
414         # look for what the output results url was.
415
416         # This is the default location.
417         res, out, err, user = logging_run(tests_included=True)
418         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
419
420     def test_results_directory_relative(self):
421         # We run a configuration that should fail, to generate output, then
422         # look for what the output results url was.
423
424         res, out, err, user = logging_run(['--results-directory=foo'],
425                                           tests_included=True)
426         self.assertEqual(user.opened_urls, ['/tmp/foo/results.html'])
427
428     def test_tolerance(self):
429         class ImageDiffTestPort(TestPort):
430             def diff_image(self, expected_contents, actual_contents,
431                    diff_filename=None):
432                 self.tolerance_used_for_diff_image = self._options.tolerance
433                 return True
434
435         def get_port_for_run(args):
436             options, parsed_args = run_webkit_tests.parse_args(args)
437             test_port = ImageDiffTestPort(options=options, user=mocktool.MockUser())
438             passing_run(args, port_obj=test_port, tests_included=True)
439             return test_port
440
441         base_args = ['--pixel-tests', 'failures/expected/*']
442
443         # If we pass in an explicit tolerance argument, then that will be used.
444         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
445         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
446         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
447         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
448
449         # Otherwise the port's default tolerance behavior (including ignoring it)
450         # should be used.
451         test_port = get_port_for_run(base_args)
452         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
453
454     def test_worker_model__inline(self):
455         self.assertTrue(passing_run(['--worker-model', 'inline']))
456
457     def test_worker_model__old_inline_with_child_processes(self):
458         res, out, err, user = logging_run(['--worker-model', 'old-inline',
459                                            '--child-processes', '2'])
460         self.assertEqual(res, 0)
461         self.assertTrue('--worker-model=old-inline overrides --child-processes\n' in err.get())
462
463     def test_worker_model__old_inline(self):
464         self.assertTrue(passing_run(['--worker-model', 'old-inline']))
465
466     def test_worker_model__old_threads(self):
467         self.assertTrue(passing_run(['--worker-model', 'old-threads']))
468
469     def test_worker_model__processes(self):
470         self.assertRaises(ValueError, logging_run, ['--worker-model', 'processes'])
471
472     def test_worker_model__threads(self):
473         self.assertRaises(ValueError, logging_run, ['--worker-model', 'threads'])
474
475     def test_worker_model__unknown(self):
476         self.assertRaises(ValueError, logging_run,
477                           ['--worker-model', 'unknown'])
478
479 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
480
481
482 class RebaselineTest(unittest.TestCase):
483     def assertBaselines(self, file_list, file):
484         "assert that the file_list contains the baselines."""
485         for ext in [".txt", ".png", ".checksum"]:
486             baseline = file + "-expected" + ext
487             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
488
489     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
490     # supposed to be.
491
492     def test_reset_results(self):
493         # Test that we update expectations in place. If the expectation
494         # is missing, update the expected generic location.
495         fs = port.unit_test_filesystem()
496         passing_run(['--pixel-tests',
497                         '--reset-results',
498                         'passes/image.html',
499                         'failures/expected/missing_image.html'],
500                         tests_included=True, filesystem=fs)
501         file_list = fs.written_files.keys()
502         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
503         self.assertEqual(len(file_list), 6)
504         self.assertBaselines(file_list,
505             "/passes/image")
506         self.assertBaselines(file_list,
507             "/failures/expected/missing_image")
508
509     def test_new_baseline(self):
510         # Test that we update the platform expectations. If the expectation
511         # is mssing, then create a new expectation in the platform dir.
512         fs = port.unit_test_filesystem()
513         passing_run(['--pixel-tests',
514                         '--new-baseline',
515                         'passes/image.html',
516                         'failures/expected/missing_image.html'],
517                     tests_included=True, filesystem=fs)
518         file_list = fs.written_files.keys()
519         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
520         self.assertEqual(len(file_list), 6)
521         self.assertBaselines(file_list,
522             "/platform/test-mac/passes/image")
523         self.assertBaselines(file_list,
524             "/platform/test-mac/failures/expected/missing_image")
525
526
527 class DryrunTest(unittest.TestCase):
528     # FIXME: it's hard to know which platforms are safe to test; the
529     # chromium platforms require a chromium checkout, and the mac platform
530     # requires fcntl, so it can't be tested on win32, etc. There is
531     # probably a better way of handling this.
532     def disabled_test_darwin(self):
533         if sys.platform != "darwin":
534             return
535
536         self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'],
537                         tests_included=True))
538         self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'],
539                         tests_included=True))
540
541     def test_test(self):
542         self.assertTrue(passing_run(['--platform', 'dryrun-test',
543                                            '--pixel-tests']))
544
545
546 if __name__ == '__main__':
547     unittest.main()