bf1f0aa96c16f87e8e9937b0d5a47d6aa9662f90
[WebKit.git] / WebKitTools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Unit tests for run_webkit_tests."""
32
33 import codecs
34 import itertools
35 import logging
36 import os
37 import Queue
38 import shutil
39 import sys
40 import tempfile
41 import thread
42 import time
43 import threading
44 import unittest
45
46 from webkitpy.common import array_stream
47 from webkitpy.common.system import outputcapture
48 from webkitpy.common.system import user
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
55
56 from webkitpy.thirdparty.mock import Mock
57
58
59 class MockUser():
60     def __init__(self):
61         self.url = None
62
63     def open_url(self, url):
64         self.url = url
65
66
67 def passing_run(extra_args=None, port_obj=None, record_results=False,
68                 tests_included=False):
69     extra_args = extra_args or []
70     args = ['--print', 'nothing']
71     if not '--platform' in extra_args:
72         args.extend(['--platform', 'test'])
73     if not record_results:
74         args.append('--no-record-results')
75     args.extend(extra_args)
76     if not tests_included:
77         # We use the glob to test that globbing works.
78         args.extend(['passes',
79                      'http/tests',
80                      'websocket/tests',
81                      'failures/expected/*'])
82     options, parsed_args = run_webkit_tests.parse_args(args)
83     if not port_obj:
84         port_obj = port.get(port_name=options.platform, options=options,
85                             user=MockUser())
86     res = run_webkit_tests.run(port_obj, options, parsed_args)
87     return res == 0
88
89
90 def logging_run(extra_args=None, port_obj=None, tests_included=False):
91     extra_args = extra_args or []
92     args = ['--no-record-results']
93     if not '--platform' in extra_args:
94         args.extend(['--platform', 'test'])
95     args.extend(extra_args)
96     if not tests_included:
97         args.extend(['passes',
98                      'http/tests',
99                      'websocket/tests',
100                      'failures/expected/*'])
101     options, parsed_args = run_webkit_tests.parse_args(args)
102     user = MockUser()
103     if not port_obj:
104         port_obj = port.get(port_name=options.platform, options=options, user=user)
105     buildbot_output = array_stream.ArrayStream()
106     regular_output = array_stream.ArrayStream()
107     res = run_webkit_tests.run(port_obj, options, parsed_args,
108                                buildbot_output=buildbot_output,
109                                regular_output=regular_output)
110     return (res, buildbot_output, regular_output, user)
111
112
113 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
114     extra_args = extra_args or []
115     args = [
116         '--print', 'nothing',
117         '--platform', 'test',
118         '--no-record-results',
119         '--child-processes', '1']
120     args.extend(extra_args)
121     if not tests_included:
122         # Not including http tests since they get run out of order (that
123         # behavior has its own test, see test_get_test_file_queue)
124         args.extend(['passes', 'failures'])
125     options, parsed_args = run_webkit_tests.parse_args(args)
126     user = MockUser()
127
128     test_batches = []
129
130     class RecordingTestDriver(TestDriver):
131         def __init__(self, port, image_path, options):
132             TestDriver.__init__(self, port, image_path, options, executive=None)
133             self._current_test_batch = None
134
135         def poll(self):
136             # So that we don't create a new driver for every test
137             return None
138
139         def stop(self):
140             self._current_test_batch = None
141
142         def run_test(self, test_input):
143             if self._current_test_batch is None:
144                 self._current_test_batch = []
145                 test_batches.append(self._current_test_batch)
146             test_name = self._port.relative_test_filename(test_input.filename)
147             self._current_test_batch.append(test_name)
148             return TestDriver.run_test(self, test_input)
149
150     class RecordingTestPort(TestPort):
151         def create_driver(self, image_path, options):
152             return RecordingTestDriver(self, image_path, options)
153
154     recording_port = RecordingTestPort(options=options, user=user)
155     logging_run(extra_args=args, port_obj=recording_port, tests_included=True)
156
157     if flatten_batches:
158         return list(itertools.chain(*test_batches))
159
160     return test_batches
161
162 class MainTest(unittest.TestCase):
163     def test_accelerated_compositing(self):
164         # This just tests that we recognize the command line args
165         self.assertTrue(passing_run(['--accelerated-compositing']))
166         self.assertTrue(passing_run(['--no-accelerated-compositing']))
167
168     def test_accelerated_2d_canvas(self):
169         # This just tests that we recognize the command line args
170         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
171         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
172
173     def test_basic(self):
174         self.assertTrue(passing_run())
175
176     def test_batch_size(self):
177         batch_tests_run = get_tests_run(['--batch-size', '2'])
178         for batch in batch_tests_run:
179             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
180
181     def test_child_process_1(self):
182         (res, buildbot_output, regular_output, user) = logging_run(
183              ['--print', 'config', '--child-processes', '1'])
184         self.assertTrue('Running one DumpRenderTree\n'
185                         in regular_output.get())
186
187     def test_child_processes_2(self):
188         (res, buildbot_output, regular_output, user) = logging_run(
189              ['--print', 'config', '--child-processes', '2'])
190         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
191                         in regular_output.get())
192
193     def test_exception_raised(self):
194         self.assertRaises(ValueError, logging_run,
195             ['failures/expected/exception.html'], tests_included=True)
196
197     def test_full_results_html(self):
198         # FIXME: verify html?
199         self.assertTrue(passing_run(['--full-results-html']))
200
201     def test_help_printing(self):
202         res, out, err, user = logging_run(['--help-printing'])
203         self.assertEqual(res, 0)
204         self.assertTrue(out.empty())
205         self.assertFalse(err.empty())
206
207     def test_hung_thread(self):
208         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
209                                           'failures/expected/hang.html'],
210                                           tests_included=True)
211         self.assertEqual(res, 0)
212         self.assertFalse(out.empty())
213         self.assertFalse(err.empty())
214
215     def test_keyboard_interrupt(self):
216         # Note that this also tests running a test marked as SKIP if
217         # you specify it explicitly.
218         self.assertRaises(KeyboardInterrupt, passing_run,
219             ['failures/expected/keyboard.html'], tests_included=True)
220
221     def test_last_results(self):
222         passing_run(['--clobber-old-results'], record_results=True)
223         (res, buildbot_output, regular_output, user) = logging_run(
224             ['--print-last-failures'])
225         self.assertEqual(regular_output.get(), ['\n\n'])
226         self.assertEqual(buildbot_output.get(), [])
227
228     def test_lint_test_files(self):
229         # FIXME:  add errors?
230         res, out, err, user = logging_run(['--lint-test-files'],
231                                           tests_included=True)
232         self.assertEqual(res, 0)
233         self.assertTrue(out.empty())
234         self.assertTrue(any(['lint succeeded' in msg for msg in err.get()]))
235
236     def test_no_tests_found(self):
237         res, out, err, user = logging_run(['resources'], tests_included=True)
238         self.assertEqual(res, -1)
239         self.assertTrue(out.empty())
240         self.assertTrue('No tests to run.\n' in err.get())
241
242     def test_no_tests_found_2(self):
243         res, out, err, user = logging_run(['foo'], tests_included=True)
244         self.assertEqual(res, -1)
245         self.assertTrue(out.empty())
246         self.assertTrue('No tests to run.\n' in err.get())
247
248     def test_randomize_order(self):
249         # FIXME: verify order was shuffled
250         self.assertTrue(passing_run(['--randomize-order']))
251
252     def test_run_chunk(self):
253         # Test that we actually select the right chunk
254         all_tests_run = get_tests_run(flatten_batches=True)
255         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
256         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
257
258         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
259         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
260         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
261         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
262
263     def test_run_force(self):
264         # This raises an exception because we run
265         # failures/expected/exception.html, which is normally SKIPped.
266         self.assertRaises(ValueError, logging_run, ['--force'])
267
268     def test_run_part(self):
269         # Test that we actually select the right part
270         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
271         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
272         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
273
274         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
275         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
276         # last part repeats the first two tests).
277         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
278         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
279
280     def test_run_singly(self):
281         batch_tests_run = get_tests_run(['--run-singly'])
282         for batch in batch_tests_run:
283             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
284
285     def test_single_file(self):
286         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
287         self.assertEquals(['passes/text.html'], tests_run)
288
289     def test_test_list(self):
290         filename = tempfile.mktemp()
291         tmpfile = file(filename, mode='w+')
292         tmpfile.write('passes/text.html')
293         tmpfile.close()
294         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
295         self.assertEquals(['passes/text.html'], tests_run)
296         os.remove(filename)
297         res, out, err, user = logging_run(['--test-list=%s' % filename],
298                                           tests_included=True)
299         self.assertEqual(res, -1)
300         self.assertFalse(err.empty())
301
302     def test_unexpected_failures(self):
303         # Run tests including the unexpected failures.
304         self._url_opened = None
305         res, out, err, user = logging_run(tests_included=True)
306         self.assertEqual(res, 1)
307         self.assertFalse(out.empty())
308         self.assertFalse(err.empty())
309         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
310
311     def test_results_directory_absolute(self):
312         # We run a configuration that should fail, to generate output, then
313         # look for what the output results url was.
314
315         tmpdir = tempfile.mkdtemp()
316         res, out, err, user = logging_run(['--results-directory=' + tmpdir],
317                                           tests_included=True)
318         self.assertEqual(user.url, os.path.join(tmpdir, 'results.html'))
319         shutil.rmtree(tmpdir, ignore_errors=True)
320
321     def test_results_directory_default(self):
322         # We run a configuration that should fail, to generate output, then
323         # look for what the output results url was.
324
325         # This is the default location.
326         res, out, err, user = logging_run(tests_included=True)
327         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
328
329     def test_results_directory_relative(self):
330         # We run a configuration that should fail, to generate output, then
331         # look for what the output results url was.
332
333         res, out, err, user = logging_run(['--results-directory=foo'],
334                                           tests_included=True)
335         self.assertEqual(user.url, '/tmp/foo/results.html')
336
337     def test_tolerance(self):
338         class ImageDiffTestPort(TestPort):
339             def diff_image(self, expected_contents, actual_contents,
340                    diff_filename=None):
341                 self.tolerance_used_for_diff_image = self._options.tolerance
342                 return True
343
344         def get_port_for_run(args):
345             options, parsed_args = run_webkit_tests.parse_args(args)
346             test_port = ImageDiffTestPort(options=options, user=MockUser())
347             passing_run(args, port_obj=test_port, tests_included=True)
348             return test_port
349
350         base_args = ['--pixel-tests', 'failures/expected/*']
351
352         # If we pass in an explicit tolerance argument, then that will be used.
353         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
354         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
355         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
356         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
357
358         # Otherwise the port's default tolerance behavior (including ignoring it)
359         # should be used.
360         test_port = get_port_for_run(base_args)
361         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
362
363 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
364
365
366 def _mocked_open(original_open, file_list):
367     def _wrapper(name, mode, encoding):
368         if name.find("-expected.") != -1 and mode.find("w") != -1:
369             # we don't want to actually write new baselines, so stub these out
370             name.replace('\\', '/')
371             file_list.append(name)
372             return original_open(os.devnull, mode, encoding)
373         return original_open(name, mode, encoding)
374     return _wrapper
375
376
377 class RebaselineTest(unittest.TestCase):
378     def assertBaselines(self, file_list, file):
379         "assert that the file_list contains the baselines."""
380         for ext in [".txt", ".png", ".checksum"]:
381             baseline = file + "-expected" + ext
382             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
383
384     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
385     # supposed to be.
386
387     def disabled_test_reset_results(self):
388         # FIXME: This test is disabled until we can rewrite it to use a
389         # mock filesystem.
390         #
391         # Test that we update expectations in place. If the expectation
392         # is missing, update the expected generic location.
393         file_list = []
394         passing_run(['--pixel-tests',
395                         '--reset-results',
396                         'passes/image.html',
397                         'failures/expected/missing_image.html'],
398                         tests_included=True)
399         self.assertEqual(len(file_list), 6)
400         self.assertBaselines(file_list,
401             "data/passes/image")
402         self.assertBaselines(file_list,
403             "data/failures/expected/missing_image")
404
405     def disabled_test_new_baseline(self):
406         # FIXME: This test is disabled until we can rewrite it to use a
407         # mock filesystem.
408         #
409         # Test that we update the platform expectations. If the expectation
410         # is mssing, then create a new expectation in the platform dir.
411         file_list = []
412         original_open = codecs.open
413         try:
414             # Test that we update the platform expectations. If the expectation
415             # is mssing, then create a new expectation in the platform dir.
416             file_list = []
417             codecs.open = _mocked_open(original_open, file_list)
418             passing_run(['--pixel-tests',
419                          '--new-baseline',
420                          'passes/image.html',
421                          'failures/expected/missing_image.html'],
422                         tests_included=True)
423             self.assertEqual(len(file_list), 6)
424             self.assertBaselines(file_list,
425                 "data/platform/test/passes/image")
426             self.assertBaselines(file_list,
427                 "data/platform/test/failures/expected/missing_image")
428         finally:
429             codecs.open = original_open
430
431
432 class TestRunnerWrapper(run_webkit_tests.TestRunner):
433     def _get_test_input_for_file(self, test_file):
434         return test_file
435
436
437 class TestRunnerTest(unittest.TestCase):
438     def test_results_html(self):
439         mock_port = Mock()
440         mock_port.relative_test_filename = lambda name: name
441         mock_port.filename_to_uri = lambda name: name
442
443         runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(), printer=Mock())
444         expected_html = u"""<html>
445   <head>
446     <title>Layout Test Results (time)</title>
447   </head>
448   <body>
449     <h2>Title (time)</h2>
450         <p><a href='test_path'>test_path</a><br />
451 </p>
452 </body></html>
453 """
454         html = runner._results_html(["test_path"], {}, "Title", override_time="time")
455         self.assertEqual(html, expected_html)
456
457     def queue_to_list(self, queue):
458         queue_list = []
459         while(True):
460             try:
461                 queue_list.append(queue.get_nowait())
462             except Queue.Empty:
463                 break
464         return queue_list
465
466     def test_get_test_file_queue(self):
467         # Test that _get_test_file_queue in run_webkit_tests.TestRunner really
468         # put the http tests first in the queue.
469         runner = TestRunnerWrapper(port=Mock(), options=Mock(), printer=Mock())
470         runner._options.experimental_fully_parallel = False
471
472         test_list = [
473           "LayoutTests/websocket/tests/unicode.htm",
474           "LayoutTests/animations/keyframes.html",
475           "LayoutTests/http/tests/security/view-source-no-refresh.html",
476           "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
477           "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
478           "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
479           "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
480           "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
481           "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
482         ]
483
484         expected_tests_to_http_lock = set([
485           'LayoutTests/websocket/tests/unicode.htm',
486           'LayoutTests/http/tests/security/view-source-no-refresh.html',
487           'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
488           'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
489         ])
490
491         runner._options.child_processes = 1
492         test_queue_for_single_thread = runner._get_test_file_queue(test_list)
493         runner._options.child_processes = 2
494         test_queue_for_multi_thread = runner._get_test_file_queue(test_list)
495
496         single_thread_results = self.queue_to_list(test_queue_for_single_thread)
497         multi_thread_results = self.queue_to_list(test_queue_for_multi_thread)
498
499         self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
500         self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
501         self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
502         self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
503
504 class DryrunTest(unittest.TestCase):
505     # FIXME: it's hard to know which platforms are safe to test; the
506     # chromium platforms require a chromium checkout, and the mac platform
507     # requires fcntl, so it can't be tested on win32, etc. There is
508     # probably a better way of handling this.
509     def test_darwin(self):
510         if sys.platform != "darwin":
511             return
512
513         self.assertTrue(passing_run(['--platform', 'test']))
514         self.assertTrue(passing_run(['--platform', 'dryrun',
515                                      'fast/html']))
516         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
517                                      'fast/html']))
518
519     def test_test(self):
520         self.assertTrue(passing_run(['--platform', 'dryrun-test',
521                                            '--pixel-tests']))
522
523
524 if __name__ == '__main__':
525     unittest.main()