50b59aeef03fe93aa605cf054414904e51ad4f92
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_integrationtest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 # Copyright (C) 2011 Apple Inc. All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
8 # met:
9 #
10 #     * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 #     * Redistributions in binary form must reproduce the above
13 # copyright notice, this list of conditions and the following disclaimer
14 # in the documentation and/or other materials provided with the
15 # distribution.
16 #     * Neither the name of Google Inc. nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
32 """Unit tests for run_webkit_tests."""
33
34 from __future__ import with_statement
35
36 import codecs
37 import itertools
38 import logging
39 import Queue
40 import re
41 import sys
42 import thread
43 import time
44 import threading
45 import unittest
46
47 try:
48     import multiprocessing
49 except ImportError:
50     multiprocessing = None
51
52 try:
53     import json
54 except ImportError:
55     # python 2.5 compatibility
56     import webkitpy.thirdparty.simplejson as json
57
58 # FIXME: remove this when we fix test-webkitpy to work properly on cygwin
59 # (bug 63846).
60 SHOULD_TEST_PROCESSES = multiprocessing and sys.platform not in ('cygwin', 'win32')
61
62 from webkitpy.common import array_stream
63 from webkitpy.common.system import outputcapture
64 from webkitpy.common.host_mock import MockHost
65
66 from webkitpy.layout_tests import port
67 from webkitpy.layout_tests import run_webkit_tests
68 from webkitpy.layout_tests.port.test import TestPort, TestDriver, unit_test_filesystem
69 from webkitpy.layout_tests.port.base import is_reference_html_file
70 from webkitpy.python24.versioning import compare_version
71 from webkitpy.test.skip import skip_if
72
73
74 def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
75     extra_args = extra_args or []
76     if print_nothing:
77         args = ['--print', 'nothing']
78     else:
79         args = []
80     if not '--platform' in extra_args:
81         args.extend(['--platform', 'test'])
82     if not record_results:
83         args.append('--no-record-results')
84     if not new_results:
85         args.append('--no-new-test-results')
86
87     if not '--child-processes' in extra_args and not '--worker-model' in extra_args:
88         args.extend(['--worker-model', 'inline'])
89     args.extend(extra_args)
90     if not tests_included:
91         # We use the glob to test that globbing works.
92         args.extend(['passes',
93                      'http/tests',
94                      'websocket/tests',
95                      'failures/expected/*'])
96     return run_webkit_tests.parse_args(args)
97
98
99 def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
100     options, parsed_args = parse_args(extra_args, record_results, tests_included)
101     filesystem = filesystem or unit_test_filesystem()
102     if not port_obj:
103         host = MockHost()
104         port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem)
105     buildbot_output = array_stream.ArrayStream()
106     regular_output = array_stream.ArrayStream()
107     res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
108     return res == 0 and regular_output.empty() and buildbot_output.empty()
109
110
111 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None, new_results=False):
112     options, parsed_args = parse_args(extra_args=extra_args,
113                                       record_results=record_results,
114                                       tests_included=tests_included,
115                                       print_nothing=False, new_results=new_results)
116     host = MockHost()
117     filesystem = filesystem or unit_test_filesystem()
118     if not port_obj:
119         port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem)
120
121     res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args)
122     return (res, buildbot_output, regular_output, host.user)
123
124
125 def run_and_capture(port_obj, options, parsed_args):
126     oc = outputcapture.OutputCapture()
127     try:
128         oc.capture_output()
129         buildbot_output = array_stream.ArrayStream()
130         regular_output = array_stream.ArrayStream()
131         res = run_webkit_tests.run(port_obj, options, parsed_args,
132                                    buildbot_output=buildbot_output,
133                                    regular_output=regular_output)
134     finally:
135         oc.restore_output()
136     return (res, buildbot_output, regular_output)
137
138
139 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
140                   filesystem=None, include_reference_html=False):
141     extra_args = extra_args or []
142     if not tests_included:
143         # Not including http tests since they get run out of order (that
144         # behavior has its own test, see test_get_test_file_queue)
145         extra_args = ['passes', 'failures'] + extra_args
146     options, parsed_args = parse_args(extra_args, tests_included=True)
147
148     host = MockHost()
149
150     test_batches = []
151
152
153     class RecordingTestDriver(TestDriver):
154         def __init__(self, port, worker_number):
155             TestDriver.__init__(self, port, worker_number, pixel_tests=port.get_option('pixel_test'))
156             self._current_test_batch = None
157
158         def stop(self):
159             self._current_test_batch = None
160
161         def run_test(self, test_input):
162             if self._current_test_batch is None:
163                 self._current_test_batch = []
164                 test_batches.append(self._current_test_batch)
165             test_name = test_input.test_name
166             # In case of reftest, one test calls the driver's run_test() twice.
167             # We should not add a reference html used by reftests to tests unless include_reference_html parameter
168             # is explicitly given.
169             filesystem = self._port.host.filesystem
170             dirname, filename = filesystem.split(test_name)
171             if include_reference_html or not is_reference_html_file(filesystem, dirname, filename):
172                 self._current_test_batch.append(test_name)
173             return TestDriver.run_test(self, test_input)
174
175     class RecordingTestPort(TestPort):
176         def create_driver(self, worker_number):
177             return RecordingTestDriver(self, worker_number)
178
179     recording_port = RecordingTestPort(options=options, host=host, filesystem=filesystem)
180     run_and_capture(recording_port, options, parsed_args)
181
182     if flatten_batches:
183         return list(itertools.chain(*test_batches))
184
185     return test_batches
186
187
188 class MainTest(unittest.TestCase):
189     def test_accelerated_compositing(self):
190         # This just tests that we recognize the command line args
191         self.assertTrue(passing_run(['--accelerated-video']))
192         self.assertTrue(passing_run(['--no-accelerated-video']))
193
194     def test_accelerated_2d_canvas(self):
195         # This just tests that we recognize the command line args
196         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
197         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
198
199     def test_basic(self):
200         self.assertTrue(passing_run())
201
202     def test_batch_size(self):
203         batch_tests_run = get_tests_run(['--batch-size', '2'])
204         for batch in batch_tests_run:
205             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
206
207     def test_child_process_1(self):
208         if SHOULD_TEST_PROCESSES:
209             _, _, regular_output, _ = logging_run(
210                 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '1'])
211             self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
212
213     def test_child_processes_2(self):
214         # This test seems to fail on win32.
215         if sys.platform == 'win32':
216             return
217         if SHOULD_TEST_PROCESSES:
218             _, _, regular_output, _ = logging_run(
219                 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2'])
220             self.assertTrue(any(['Running 2 ' in line for line in regular_output.get()]))
221
222     def test_child_processes_min(self):
223         if SHOULD_TEST_PROCESSES:
224             _, _, regular_output, _ = logging_run(
225                 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2', 'passes'],
226                 tests_included=True)
227             self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
228
229     def test_dryrun(self):
230         batch_tests_run = get_tests_run(['--dry-run'])
231         self.assertEqual(batch_tests_run, [])
232
233         batch_tests_run = get_tests_run(['-n'])
234         self.assertEqual(batch_tests_run, [])
235
236     def test_exception_raised(self):
237         # Exceptions raised by a worker are treated differently depending on
238         # whether they are in-process or out. inline exceptions work as normal,
239         # which allows us to get the full stack trace and traceback from the
240         # worker. The downside to this is that it could be any error, but this
241         # is actually useful in testing, which is what --worker-model=inline is
242         # usually used for.
243         #
244         # Exceptions raised in a separate process are re-packaged into
245         # WorkerExceptions, which have a string capture of the stack which can
246         # be printed, but don't display properly in the unit test exception handlers.
247         self.assertRaises(ValueError, logging_run,
248             ['failures/expected/exception.html'], tests_included=True)
249
250         if SHOULD_TEST_PROCESSES:
251             self.assertRaises(run_webkit_tests.WorkerException, logging_run,
252                 ['--worker-model', 'processes', 'failures/expected/exception.html'], tests_included=True)
253
254     def test_full_results_html(self):
255         # FIXME: verify html?
256         res, out, err, user = logging_run(['--full-results-html'])
257         self.assertEqual(res, 0)
258
259     def test_help_printing(self):
260         res, out, err, user = logging_run(['--help-printing'])
261         self.assertEqual(res, 0)
262         self.assertTrue(out.empty())
263         self.assertFalse(err.empty())
264
265     def test_hung_thread(self):
266         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
267                                           'failures/expected/hang.html'],
268                                           tests_included=True)
269         self.assertEqual(res, 0)
270         self.assertFalse(out.empty())
271         self.assertFalse(err.empty())
272
273     def test_keyboard_interrupt(self):
274         # Note that this also tests running a test marked as SKIP if
275         # you specify it explicitly.
276         self.assertRaises(KeyboardInterrupt, logging_run,
277             ['failures/expected/keyboard.html'], tests_included=True)
278
279     def test_keyboard_interrupt_inline_worker_model(self):
280         self.assertRaises(KeyboardInterrupt, logging_run,
281             ['failures/expected/keyboard.html', '--worker-model', 'inline'],
282             tests_included=True)
283
284     def test_lint_test_files(self):
285         res, out, err, user = logging_run(['--lint-test-files'])
286         self.assertEqual(res, 0)
287         self.assertTrue(out.empty())
288         self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
289
290     def test_lint_test_files__errors(self):
291         options, parsed_args = parse_args(['--lint-test-files'])
292         host = MockHost()
293         port_obj = host.port_factory.get(options.platform, options=options)
294         port_obj.test_expectations = lambda: "# syntax error"
295         res, out, err = run_and_capture(port_obj, options, parsed_args)
296
297         self.assertEqual(res, -1)
298         self.assertTrue(out.empty())
299         self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
300
301     def test_no_tests_found(self):
302         res, out, err, user = logging_run(['resources'], tests_included=True)
303         self.assertEqual(res, -1)
304         self.assertTrue(out.empty())
305         self.assertTrue('No tests to run.\n' in err.get())
306
307     def test_no_tests_found_2(self):
308         res, out, err, user = logging_run(['foo'], tests_included=True)
309         self.assertEqual(res, -1)
310         self.assertTrue(out.empty())
311         self.assertTrue('No tests to run.\n' in err.get())
312
313     def test_randomize_order(self):
314         # FIXME: verify order was shuffled
315         self.assertTrue(passing_run(['--randomize-order']))
316
317     def test_gc_between_tests(self):
318         self.assertTrue(passing_run(['--gc-between-tests']))
319
320     def test_complex_text(self):
321         self.assertTrue(passing_run(['--complex-text']))
322
323     def test_threaded(self):
324         self.assertTrue(passing_run(['--threaded']))
325
326     def test_repeat_each(self):
327         tests_to_run = ['passes/image.html', 'passes/text.html']
328         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
329         self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
330
331     def test_iterations(self):
332         tests_to_run = ['passes/image.html', 'passes/text.html']
333         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
334         self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
335
336     def test_run_chunk(self):
337         # Test that we actually select the right chunk
338         all_tests_run = get_tests_run(flatten_batches=True)
339         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
340         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
341
342         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
343         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
344         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
345         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
346
347     def test_run_force(self):
348         # This raises an exception because we run
349         # failures/expected/exception.html, which is normally SKIPped.
350
351         # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
352         self.assertRaises(ValueError, logging_run, ['--force'])
353
354     def test_run_part(self):
355         # Test that we actually select the right part
356         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
357         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
358         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
359
360         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
361         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
362         # last part repeats the first two tests).
363         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
364         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
365
366     def test_run_singly(self):
367         batch_tests_run = get_tests_run(['--run-singly'])
368         for batch in batch_tests_run:
369             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
370
371     def test_skip_failing_tests(self):
372         batches = get_tests_run(['--skip-failing-tests'])
373         has_passes_text = False
374         for batch in batches:
375             self.assertFalse('failures/expected/text.html' in batch)
376             has_passes_text = has_passes_text or ('passes/text.html' in batch)
377         self.assertTrue(has_passes_text)
378
379     def test_run_singly_actually_runs_tests(self):
380         res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
381         self.assertEquals(res, 7)
382
383     def test_single_file(self):
384         # FIXME: We should consider replacing more of the get_tests_run()-style tests
385         # with tests that read the tests_run* files, like this one.
386         fs = unit_test_filesystem()
387         tests_run = passing_run(['passes/text.html'], tests_included=True, filesystem=fs)
388         self.assertEquals(fs.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
389                           'passes/text.html\n')
390
391     def test_single_file_with_prefix(self):
392         tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
393         self.assertEquals(['passes/text.html'], tests_run)
394
395     def test_single_skipped_file(self):
396         tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
397         self.assertEquals([], tests_run)
398
399     def test_stderr_is_saved(self):
400         fs = unit_test_filesystem()
401         self.assertTrue(passing_run(filesystem=fs))
402         self.assertEquals(fs.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
403                           'stuff going to stderr')
404
405     def test_test_list(self):
406         fs = unit_test_filesystem()
407         filename = '/tmp/foo.txt'
408         fs.write_text_file(filename, 'passes/text.html')
409         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
410         self.assertEquals(['passes/text.html'], tests_run)
411         fs.remove(filename)
412         res, out, err, user = logging_run(['--test-list=%s' % filename],
413                                           tests_included=True, filesystem=fs)
414         self.assertEqual(res, -1)
415         self.assertFalse(err.empty())
416
417     def test_test_list_with_prefix(self):
418         fs = unit_test_filesystem()
419         filename = '/tmp/foo.txt'
420         fs.write_text_file(filename, 'LayoutTests/passes/text.html')
421         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
422         self.assertEquals(['passes/text.html'], tests_run)
423
424     def test_unexpected_failures(self):
425         # Run tests including the unexpected failures.
426         self._url_opened = None
427         res, out, err, user = logging_run(tests_included=True)
428
429         # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
430         # FIXME: It's nice to have a routine in port/test.py that returns this number.
431         unexpected_tests_count = 8
432
433         self.assertEqual(res, unexpected_tests_count)
434         self.assertFalse(out.empty())
435         self.assertFalse(err.empty())
436         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
437
438     def test_missing_and_unexpected_results(self):
439         # Test that we update expectations in place. If the expectation
440         # is missing, update the expected generic location.
441         fs = unit_test_filesystem()
442         res, out, err, _ = logging_run(['--no-show-results',
443             'failures/expected/missing_image.html',
444             'failures/unexpected/missing_text.html',
445             'failures/unexpected/text-image-checksum.html'],
446             tests_included=True, filesystem=fs, record_results=True)
447         file_list = fs.written_files.keys()
448         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
449         self.assertEquals(res, 1)
450         expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
451         json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json')
452         self.assertTrue(json_string.find(expected_token) != -1)
453         self.assertTrue(json_string.find('"num_regressions":1') != -1)
454         self.assertTrue(json_string.find('"num_flaky":0') != -1)
455         self.assertTrue(json_string.find('"num_missing":1') != -1)
456
457     def test_missing_and_unexpected_results(self):
458         # Test that we update expectations in place. If the expectation
459         # is missing, update the expected generic location.
460         fs = unit_test_filesystem()
461         res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, filesystem=fs, record_results=True)
462         file_list = fs.written_files.keys()
463         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
464         self.assertEquals(res, 1)
465         json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json')
466         self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
467         self.assertTrue(json_string.find('"num_regressions":1') != -1)
468         self.assertTrue(json_string.find('"num_flaky":0') != -1)
469         self.assertTrue(json_string.find('"num_missing":1') != -1)
470
471     def test_missing_and_unexpected_results_with_custom_exit_code(self):
472         # Test that we update expectations in place. If the expectation
473         # is missing, update the expected generic location.
474         fs = unit_test_filesystem()
475
476         class CustomExitCodePort(TestPort):
477             def exit_code_from_summarized_results(self, unexpected_results):
478                 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
479
480         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
481         test_port = CustomExitCodePort(options=options)
482         res, out, err, _ = logging_run(['--no-show-results',
483             'failures/expected/missing_image.html',
484             'failures/unexpected/missing_text.html',
485             'failures/unexpected/text-image-checksum.html'],
486             tests_included=True, filesystem=fs, record_results=True, port_obj=test_port)
487         self.assertEquals(res, 2)
488
489     def test_crash_with_stderr(self):
490         fs = unit_test_filesystem()
491         res, buildbot_output, regular_output, user = logging_run([
492                 'failures/unexpected/crash-with-stderr.html',
493             ],
494             tests_included=True,
495             record_results=True,
496             filesystem=fs)
497         self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
498
499     def test_no_image_failure_with_image_diff(self):
500         fs = unit_test_filesystem()
501         res, buildbot_output, regular_output, user = logging_run([
502                 'failures/unexpected/checksum-with-matching-image.html',
503             ],
504             tests_included=True,
505             record_results=True,
506             filesystem=fs)
507         self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
508
509     def test_crash_log(self):
510         mock_crash_report = 'mock-crash-report'
511         fs = unit_test_filesystem()
512         fs.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
513         res, buildbot_output, regular_output, user = logging_run([
514                 'failures/unexpected/crash-with-stderr.html',
515             ],
516             tests_included=True,
517             record_results=True,
518             filesystem=fs)
519         expected_crash_log = mock_crash_report
520         # Currently CrashLog uploading only works on Darwin.
521         if sys.platform != "darwin":
522             expected_crash_log = "mock-std-error-output"
523         self.assertEquals(fs.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
524
525     def test_web_process_crash_log(self):
526         mock_crash_report = 'mock-crash-report'
527         fs = unit_test_filesystem()
528         fs.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
529         res, buildbot_output, regular_output, user = logging_run([
530                 'failures/unexpected/web-process-crash-with-stderr.html',
531             ],
532             tests_included=True,
533             record_results=True,
534             filesystem=fs)
535         expected_crash_log = mock_crash_report
536         # Currently CrashLog uploading only works on Darwin.
537         if sys.platform != "darwin":
538             expected_crash_log = "mock-std-error-output"
539         self.assertEquals(fs.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), expected_crash_log)
540
541     def test_exit_after_n_failures_upload(self):
542         fs = unit_test_filesystem()
543         res, buildbot_output, regular_output, user = logging_run([
544                 'failures/unexpected/text-image-checksum.html',
545                 'passes/text.html',
546                 '--exit-after-n-failures', '1',
547             ],
548             tests_included=True,
549             record_results=True,
550             filesystem=fs)
551         self.assertTrue('/tmp/layout-test-results/incremental_results.json' in fs.files)
552
553     def test_exit_after_n_failures(self):
554         # Unexpected failures should result in tests stopping.
555         tests_run = get_tests_run([
556                 'failures/unexpected/text-image-checksum.html',
557                 'passes/text.html',
558                 '--exit-after-n-failures', '1',
559             ],
560             tests_included=True,
561             flatten_batches=True)
562         self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
563
564         # But we'll keep going for expected ones.
565         tests_run = get_tests_run([
566                 'failures/expected/text.html',
567                 'passes/text.html',
568                 '--exit-after-n-failures', '1',
569             ],
570             tests_included=True,
571             flatten_batches=True)
572         self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
573
574     def test_exit_after_n_crashes(self):
575         # Unexpected crashes should result in tests stopping.
576         tests_run = get_tests_run([
577                 'failures/unexpected/crash.html',
578                 'passes/text.html',
579                 '--exit-after-n-crashes-or-timeouts', '1',
580             ],
581             tests_included=True,
582             flatten_batches=True)
583         self.assertEquals(['failures/unexpected/crash.html'], tests_run)
584
585         # Same with timeouts.
586         tests_run = get_tests_run([
587                 'failures/unexpected/timeout.html',
588                 'passes/text.html',
589                 '--exit-after-n-crashes-or-timeouts', '1',
590             ],
591             tests_included=True,
592             flatten_batches=True)
593         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
594
595         # But we'll keep going for expected ones.
596         tests_run = get_tests_run([
597                 'failures/expected/crash.html',
598                 'passes/text.html',
599                 '--exit-after-n-crashes-or-timeouts', '1',
600             ],
601             tests_included=True,
602             flatten_batches=True)
603         self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
604
605     def test_exit_after_n_crashes_inline_worker_model(self):
606         tests_run = get_tests_run([
607                 'failures/unexpected/timeout.html',
608                 'passes/text.html',
609                 '--exit-after-n-crashes-or-timeouts', '1',
610                 '--worker-model', 'inline',
611             ],
612             tests_included=True,
613             flatten_batches=True)
614         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
615
616     def test_results_directory_absolute(self):
617         # We run a configuration that should fail, to generate output, then
618         # look for what the output results url was.
619
620         fs = unit_test_filesystem()
621         with fs.mkdtemp() as tmpdir:
622             res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
623                                               tests_included=True, filesystem=fs)
624             self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
625
626     def test_results_directory_default(self):
627         # We run a configuration that should fail, to generate output, then
628         # look for what the output results url was.
629
630         # This is the default location.
631         res, out, err, user = logging_run(tests_included=True)
632         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
633
634     def test_results_directory_relative(self):
635         # We run a configuration that should fail, to generate output, then
636         # look for what the output results url was.
637         fs = unit_test_filesystem()
638         fs.maybe_make_directory('/tmp/cwd')
639         fs.chdir('/tmp/cwd')
640         res, out, err, user = logging_run(['--results-directory=foo'],
641                                           tests_included=True, filesystem=fs)
642         self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])
643
644     def test_retries_directory(self):
645         fs = unit_test_filesystem()
646         res, out, err, user = logging_run(tests_included=True, filesystem=fs)
647         self.assertTrue('/tmp/layout-test-results/retries/tests_run0.txt' in fs.files)
648
649     # These next tests test that we run the tests in ascending alphabetical
650     # order per directory. HTTP tests are sharded separately from other tests,
651     # so we have to test both.
652     def assert_run_order(self, worker_model, child_processes='1'):
653         tests_run = get_tests_run(['--worker-model', worker_model,
654             '--child-processes', child_processes, 'passes'],
655             tests_included=True, flatten_batches=True)
656         self.assertEquals(tests_run, sorted(tests_run))
657
658         tests_run = get_tests_run(['--worker-model', worker_model,
659             '--child-processes', child_processes, 'http/tests/passes'],
660             tests_included=True, flatten_batches=True)
661         self.assertEquals(tests_run, sorted(tests_run))
662
663     def test_run_order__inline(self):
664         self.assert_run_order('inline')
665
666     def test_tolerance(self):
667         class ImageDiffTestPort(TestPort):
668             def diff_image(self, expected_contents, actual_contents, tolerance=None):
669                 self.tolerance_used_for_diff_image = self._options.tolerance
670                 return (True, 1)
671
672         def get_port_for_run(args):
673             options, parsed_args = run_webkit_tests.parse_args(args)
674             test_port = ImageDiffTestPort(options=options)
675             res = passing_run(args, port_obj=test_port, tests_included=True)
676             self.assertTrue(res)
677             return test_port
678
679         base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
680
681         # If we pass in an explicit tolerance argument, then that will be used.
682         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
683         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
684         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
685         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
686
687         # Otherwise the port's default tolerance behavior (including ignoring it)
688         # should be used.
689         test_port = get_port_for_run(base_args)
690         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
691
692     def test_worker_model__inline(self):
693         self.assertTrue(passing_run(['--worker-model', 'inline']))
694
695     def test_worker_model__inline_with_child_processes(self):
696         res, out, err, user = logging_run(['--worker-model', 'inline',
697                                            '--child-processes', '2'])
698         self.assertEqual(res, 0)
699         self.assertTrue('--worker-model=inline overrides --child-processes\n' in err.get())
700
701     def test_worker_model__processes(self):
702         if SHOULD_TEST_PROCESSES:
703             self.assertTrue(passing_run(['--worker-model', 'processes']))
704
705     def test_worker_model__processes_and_dry_run(self):
706         if SHOULD_TEST_PROCESSES:
707             self.assertTrue(passing_run(['--worker-model', 'processes', '--dry-run']))
708
709     def test_worker_model__unknown(self):
710         self.assertRaises(ValueError, logging_run, ['--worker-model', 'unknown'])
711
712     def test_reftest_run(self):
713         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
714         self.assertEquals(['passes/reftest.html'], tests_run)
715
716     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
717         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
718         self.assertEquals(['passes/reftest.html'], tests_run)
719
720     def test_reftest_skip_reftests_if_no_ref_tests(self):
721         tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
722         self.assertEquals([], tests_run)
723         tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
724         self.assertEquals([], tests_run)
725
726     def test_reftest_expected_html_should_be_ignored(self):
727         tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
728         self.assertEquals([], tests_run)
729
730     def test_reftest_driver_should_run_expected_html(self):
731         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
732         self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
733
734     def test_reftest_driver_should_run_expected_mismatch_html(self):
735         tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
736         self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
737
738     def test_additional_platform_directory(self):
739         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
740         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
741         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
742
743         res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
744         self.assertTrue('--additional-platform-directory=foo is ignored since it is not absolute\n' in regular_output.get())
745
746     def test_no_http_and_force(self):
747         # See test_run_force, using --force raises an exception.
748         # FIXME: We would like to check the warnings generated.
749         self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
750
751     @staticmethod
752     def has_test_of_type(tests, type):
753         return [test for test in tests if type in test]
754
755     def test_no_http_tests(self):
756         batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
757         self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
758         self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
759
760         batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
761         self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
762         self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
763
764         batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
765         self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
766         self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
767
768 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
769
770
771 class EndToEndTest(unittest.TestCase):
772     def parse_full_results(self, full_results_text):
773         json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
774         compressed_results = json.loads(json_to_eval)
775         return compressed_results
776
777     def test_end_to_end(self):
778         fs = unit_test_filesystem()
779         res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs)
780
781         # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
782         # FIXME: It's nice to have a routine in port/test.py that returns this number.
783         unexpected_tests_count = 8
784
785         self.assertEquals(res, unexpected_tests_count)
786         results = self.parse_full_results(fs.files['/tmp/layout-test-results/full_results.json'])
787
788         # Check to ensure we're passing back image diff %age correctly.
789         self.assertEquals(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
790
791         # Check that we attempted to display the results page in a browser.
792         self.assertTrue(user.opened_urls)
793
794 class RebaselineTest(unittest.TestCase):
795     def assertBaselines(self, file_list, file, extensions, err):
796         "assert that the file_list contains the baselines."""
797         for ext in extensions:
798             baseline = file + "-expected" + ext
799             baseline_msg = 'Writing new expected result "%s"\n' % baseline[1:]
800             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
801             self.assertTrue(baseline_msg in err.get())
802
803     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
804     # supposed to be.
805
806     def test_reset_results(self):
807         # Test that we update expectations in place. If the expectation
808         # is missing, update the expected generic location.
809         fs = unit_test_filesystem()
810         res, out, err, _ = logging_run(['--pixel-tests',
811                         '--reset-results',
812                         'passes/image.html',
813                         'failures/expected/missing_image.html'],
814                         tests_included=True, filesystem=fs, new_results=True)
815         file_list = fs.written_files.keys()
816         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
817         self.assertEquals(res, 0)
818         self.assertTrue(out.empty())
819         self.assertEqual(len(file_list), 4)
820         self.assertBaselines(file_list, "/passes/image", [".txt", ".png"], err)
821         self.assertBaselines(file_list, "/failures/expected/missing_image", [".txt", ".png"], err)
822
823     def test_missing_results(self):
824         # Test that we update expectations in place. If the expectation
825         # is missing, update the expected generic location.
826         fs = unit_test_filesystem()
827         res, out, err, _ = logging_run(['--no-show-results',
828                      'failures/unexpected/missing_text.html',
829                      'failures/unexpected/missing_image.html',
830                      'failures/unexpected/missing_audio.html',
831                      'failures/unexpected/missing_render_tree_dump.html'],
832                      tests_included=True, filesystem=fs, new_results=True)
833         file_list = fs.written_files.keys()
834         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
835         self.assertEquals(res, 0)
836         self.assertFalse(out.empty())
837         self.assertEqual(len(file_list), 6)
838         self.assertBaselines(file_list, "/failures/unexpected/missing_text", [".txt"], err)
839         self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/unexpected/missing_image", [".png"], err)
840         self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/unexpected/missing_render_tree_dump", [".txt"], err)
841
842     def test_new_baseline(self):
843         # Test that we update the platform expectations. If the expectation
844         # is mssing, then create a new expectation in the platform dir.
845         fs = unit_test_filesystem()
846         res, out, err, _ = logging_run(['--pixel-tests',
847                         '--new-baseline',
848                         'passes/image.html',
849                         'failures/expected/missing_image.html'],
850                     tests_included=True, filesystem=fs, new_results=True)
851         file_list = fs.written_files.keys()
852         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
853         self.assertEquals(res, 0)
854         self.assertTrue(out.empty())
855         self.assertEqual(len(file_list), 4)
856         self.assertBaselines(file_list,
857             "/platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
858         self.assertBaselines(file_list,
859             "/platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
860
861
862 class DryrunTest(unittest.TestCase):
863     # FIXME: it's hard to know which platforms are safe to test; the
864     # chromium platforms require a chromium checkout, and the mac platform
865     # requires fcntl, so it can't be tested on win32, etc. There is
866     # probably a better way of handling this.
867     def disabled_test_darwin(self):
868         if sys.platform != "darwin":
869             return
870
871         self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'], tests_included=True))
872         self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'], tests_included=True))
873
874     def test_test(self):
875         self.assertTrue(passing_run(['--platform', 'dryrun-test', '--pixel-tests']))
876
877
878 if __name__ == '__main__':
879     unittest.main()