2010-12-22 Dirk Pranke <dpranke@chromium.org>
[WebKit.git] / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 """Unit tests for run_webkit_tests."""
32
33 import codecs
34 import itertools
35 import logging
36 import os
37 import Queue
38 import shutil
39 import sys
40 import tempfile
41 import thread
42 import time
43 import threading
44 import unittest
45
46 from webkitpy.common import array_stream
47 from webkitpy.common.system import outputcapture
48 from webkitpy.common.system import user
49 from webkitpy.layout_tests import port
50 from webkitpy.layout_tests import run_webkit_tests
51 from webkitpy.layout_tests.layout_package import dump_render_tree_thread
52 from webkitpy.layout_tests.port.test import TestPort, TestDriver
53 from webkitpy.python24.versioning import compare_version
54 from webkitpy.test.skip import skip_if
55
56 from webkitpy.thirdparty.mock import Mock
57
58
59 class MockUser():
60     def __init__(self):
61         self.url = None
62
63     def open_url(self, url):
64         self.url = url
65
66
67 def parse_args(extra_args=None, record_results=False, tests_included=False,
68                print_nothing=True):
69     extra_args = extra_args or []
70     if print_nothing:
71         args = ['--print', 'nothing']
72     else:
73         args = []
74     if not '--platform' in extra_args:
75         args.extend(['--platform', 'test'])
76     if not record_results:
77         args.append('--no-record-results')
78     if not '--child-processes' in extra_args:
79         args.extend(['--worker-model', 'old-inline'])
80     args.extend(extra_args)
81     if not tests_included:
82         # We use the glob to test that globbing works.
83         args.extend(['passes',
84                      'http/tests',
85                      'websocket/tests',
86                      'failures/expected/*'])
87     return run_webkit_tests.parse_args(args)
88
89
90 def passing_run(extra_args=None, port_obj=None, record_results=False,
91                 tests_included=False):
92     options, parsed_args = parse_args(extra_args, record_results,
93                                       tests_included)
94     if not port_obj:
95         port_obj = port.get(port_name=options.platform, options=options,
96                             user=MockUser())
97     res = run_webkit_tests.run(port_obj, options, parsed_args)
98     return res == 0
99
100
101 def logging_run(extra_args=None, port_obj=None, tests_included=False):
102     options, parsed_args = parse_args(extra_args=extra_args,
103                                       record_results=False,
104                                       tests_included=tests_included,
105                                       print_nothing=False)
106     user = MockUser()
107     if not port_obj:
108         port_obj = port.get(port_name=options.platform, options=options,
109                             user=user)
110
111     res, buildbot_output, regular_output = run_and_capture(port_obj, options,
112                                                            parsed_args)
113     return (res, buildbot_output, regular_output, user)
114
115
116 def run_and_capture(port_obj, options, parsed_args):
117     oc = outputcapture.OutputCapture()
118     try:
119         oc.capture_output()
120         buildbot_output = array_stream.ArrayStream()
121         regular_output = array_stream.ArrayStream()
122         res = run_webkit_tests.run(port_obj, options, parsed_args,
123                                    buildbot_output=buildbot_output,
124                                    regular_output=regular_output)
125     finally:
126         oc.restore_output()
127     return (res, buildbot_output, regular_output)
128
129
130 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
131     extra_args = extra_args or []
132     if not tests_included:
133         # Not including http tests since they get run out of order (that
134         # behavior has its own test, see test_get_test_file_queue)
135         extra_args = ['passes', 'failures'] + extra_args
136     options, parsed_args = parse_args(extra_args, tests_included=True)
137
138     user = MockUser()
139
140     test_batches = []
141
142     class RecordingTestDriver(TestDriver):
143         def __init__(self, port, worker_number):
144             TestDriver.__init__(self, port, worker_number)
145             self._current_test_batch = None
146
147         def poll(self):
148             # So that we don't create a new driver for every test
149             return None
150
151         def stop(self):
152             self._current_test_batch = None
153
154         def run_test(self, test_input):
155             if self._current_test_batch is None:
156                 self._current_test_batch = []
157                 test_batches.append(self._current_test_batch)
158             test_name = self._port.relative_test_filename(test_input.filename)
159             self._current_test_batch.append(test_name)
160             return TestDriver.run_test(self, test_input)
161
162     class RecordingTestPort(TestPort):
163         def create_driver(self, worker_number):
164             return RecordingTestDriver(self, worker_number)
165
166     recording_port = RecordingTestPort(options=options, user=user)
167     run_and_capture(recording_port, options, parsed_args)
168
169     if flatten_batches:
170         return list(itertools.chain(*test_batches))
171
172     return test_batches
173
174
175 class MainTest(unittest.TestCase):
176     def test_accelerated_compositing(self):
177         # This just tests that we recognize the command line args
178         self.assertTrue(passing_run(['--accelerated-compositing']))
179         self.assertTrue(passing_run(['--no-accelerated-compositing']))
180
181     def test_accelerated_2d_canvas(self):
182         # This just tests that we recognize the command line args
183         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
184         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
185
186     def test_basic(self):
187         self.assertTrue(passing_run())
188
189     def test_batch_size(self):
190         batch_tests_run = get_tests_run(['--batch-size', '2'])
191         for batch in batch_tests_run:
192             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
193
194     def test_child_process_1(self):
195         (res, buildbot_output, regular_output, user) = logging_run(
196              ['--print', 'config', '--child-processes', '1'])
197         self.assertTrue('Running one DumpRenderTree\n'
198                         in regular_output.get())
199
200     def test_child_processes_2(self):
201         (res, buildbot_output, regular_output, user) = logging_run(
202              ['--print', 'config', '--child-processes', '2'])
203         self.assertTrue('Running 2 DumpRenderTrees in parallel\n'
204                         in regular_output.get())
205
206     def test_dryrun(self):
207         batch_tests_run = get_tests_run(['--dry-run'])
208         self.assertEqual(batch_tests_run, [])
209
210         batch_tests_run = get_tests_run(['-n'])
211         self.assertEqual(batch_tests_run, [])
212
213     def test_exception_raised(self):
214         self.assertRaises(ValueError, logging_run,
215             ['failures/expected/exception.html'], tests_included=True)
216
217     def test_full_results_html(self):
218         # FIXME: verify html?
219         self.assertTrue(passing_run(['--full-results-html']))
220
221     def test_help_printing(self):
222         res, out, err, user = logging_run(['--help-printing'])
223         self.assertEqual(res, 0)
224         self.assertTrue(out.empty())
225         self.assertFalse(err.empty())
226
227     def test_hung_thread(self):
228         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
229                                           'failures/expected/hang.html'],
230                                           tests_included=True)
231         self.assertEqual(res, 0)
232         self.assertFalse(out.empty())
233         self.assertFalse(err.empty())
234
235     def test_keyboard_interrupt(self):
236         # Note that this also tests running a test marked as SKIP if
237         # you specify it explicitly.
238         self.assertRaises(KeyboardInterrupt, logging_run,
239             ['failures/expected/keyboard.html'], tests_included=True)
240
241     def test_last_results(self):
242         passing_run(['--clobber-old-results'], record_results=True)
243         (res, buildbot_output, regular_output, user) = logging_run(
244             ['--print-last-failures'])
245         self.assertEqual(regular_output.get(), ['\n\n'])
246         self.assertEqual(buildbot_output.get(), [])
247
248     def test_lint_test_files(self):
249         res, out, err, user = logging_run(['--lint-test-files'])
250         self.assertEqual(res, 0)
251         self.assertTrue(out.empty())
252         self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
253
254     def test_lint_test_files__errors(self):
255         options, parsed_args = parse_args(['--lint-test-files'])
256         user = MockUser()
257         port_obj = port.get(options.platform, options=options, user=user)
258         port_obj.test_expectations = lambda: "# syntax error"
259         res, out, err = run_and_capture(port_obj, options, parsed_args)
260
261         self.assertEqual(res, -1)
262         self.assertTrue(out.empty())
263         self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
264
265     def test_no_tests_found(self):
266         res, out, err, user = logging_run(['resources'], tests_included=True)
267         self.assertEqual(res, -1)
268         self.assertTrue(out.empty())
269         self.assertTrue('No tests to run.\n' in err.get())
270
271     def test_no_tests_found_2(self):
272         res, out, err, user = logging_run(['foo'], tests_included=True)
273         self.assertEqual(res, -1)
274         self.assertTrue(out.empty())
275         self.assertTrue('No tests to run.\n' in err.get())
276
277     def test_randomize_order(self):
278         # FIXME: verify order was shuffled
279         self.assertTrue(passing_run(['--randomize-order']))
280
281     def test_run_chunk(self):
282         # Test that we actually select the right chunk
283         all_tests_run = get_tests_run(flatten_batches=True)
284         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
285         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
286
287         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
288         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
289         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
290         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
291
292     def test_run_force(self):
293         # This raises an exception because we run
294         # failures/expected/exception.html, which is normally SKIPped.
295         self.assertRaises(ValueError, logging_run, ['--force'])
296
297     def test_run_part(self):
298         # Test that we actually select the right part
299         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
300         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
301         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
302
303         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
304         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
305         # last part repeats the first two tests).
306         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
307         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
308
309     def test_run_singly(self):
310         batch_tests_run = get_tests_run(['--run-singly'])
311         for batch in batch_tests_run:
312             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
313
314     def test_single_file(self):
315         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
316         self.assertEquals(['passes/text.html'], tests_run)
317
318     def test_test_list(self):
319         filename = tempfile.mktemp()
320         tmpfile = file(filename, mode='w+')
321         tmpfile.write('passes/text.html')
322         tmpfile.close()
323         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True)
324         self.assertEquals(['passes/text.html'], tests_run)
325         os.remove(filename)
326         res, out, err, user = logging_run(['--test-list=%s' % filename],
327                                           tests_included=True)
328         self.assertEqual(res, -1)
329         self.assertFalse(err.empty())
330
331     def test_unexpected_failures(self):
332         # Run tests including the unexpected failures.
333         self._url_opened = None
334         res, out, err, user = logging_run(tests_included=True)
335         self.assertEqual(res, 3)
336         self.assertFalse(out.empty())
337         self.assertFalse(err.empty())
338         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
339
340     def test_exit_after_n_failures(self):
341         # Unexpected failures should result in tests stopping.
342         tests_run = get_tests_run([
343                 'failures/unexpected/text-image-checksum.html',
344                 'passes/text.html',
345                 '--exit-after-n-failures', '1',
346             ],
347             tests_included=True,
348             flatten_batches=True)
349         self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
350
351         # But we'll keep going for expected ones.
352         tests_run = get_tests_run([
353                 'failures/expected/text.html',
354                 'passes/text.html',
355                 '--exit-after-n-failures', '1',
356             ],
357             tests_included=True,
358             flatten_batches=True)
359         self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
360
361     def test_exit_after_n_crashes(self):
362         # Unexpected crashes should result in tests stopping.
363         tests_run = get_tests_run([
364                 'failures/unexpected/crash.html',
365                 'passes/text.html',
366                 '--exit-after-n-crashes-or-timeouts', '1',
367             ],
368             tests_included=True,
369             flatten_batches=True)
370         self.assertEquals(['failures/unexpected/crash.html'], tests_run)
371
372         # Same with timeouts.
373         tests_run = get_tests_run([
374                 'failures/unexpected/timeout.html',
375                 'passes/text.html',
376                 '--exit-after-n-crashes-or-timeouts', '1',
377             ],
378             tests_included=True,
379             flatten_batches=True)
380         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
381
382         # But we'll keep going for expected ones.
383         tests_run = get_tests_run([
384                 'failures/expected/crash.html',
385                 'passes/text.html',
386                 '--exit-after-n-crashes-or-timeouts', '1',
387             ],
388             tests_included=True,
389             flatten_batches=True)
390         self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
391
392     def test_results_directory_absolute(self):
393         # We run a configuration that should fail, to generate output, then
394         # look for what the output results url was.
395
396         tmpdir = tempfile.mkdtemp()
397         res, out, err, user = logging_run(['--results-directory=' + tmpdir],
398                                           tests_included=True)
399         self.assertEqual(user.url, os.path.join(tmpdir, 'results.html'))
400         shutil.rmtree(tmpdir, ignore_errors=True)
401
402     def test_results_directory_default(self):
403         # We run a configuration that should fail, to generate output, then
404         # look for what the output results url was.
405
406         # This is the default location.
407         res, out, err, user = logging_run(tests_included=True)
408         self.assertEqual(user.url, '/tmp/layout-test-results/results.html')
409
410     def test_results_directory_relative(self):
411         # We run a configuration that should fail, to generate output, then
412         # look for what the output results url was.
413
414         res, out, err, user = logging_run(['--results-directory=foo'],
415                                           tests_included=True)
416         self.assertEqual(user.url, '/tmp/foo/results.html')
417
418     def test_tolerance(self):
419         class ImageDiffTestPort(TestPort):
420             def diff_image(self, expected_contents, actual_contents,
421                    diff_filename=None):
422                 self.tolerance_used_for_diff_image = self._options.tolerance
423                 return True
424
425         def get_port_for_run(args):
426             options, parsed_args = run_webkit_tests.parse_args(args)
427             test_port = ImageDiffTestPort(options=options, user=MockUser())
428             passing_run(args, port_obj=test_port, tests_included=True)
429             return test_port
430
431         base_args = ['--pixel-tests', 'failures/expected/*']
432
433         # If we pass in an explicit tolerance argument, then that will be used.
434         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
435         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
436         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
437         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
438
439         # Otherwise the port's default tolerance behavior (including ignoring it)
440         # should be used.
441         test_port = get_port_for_run(base_args)
442         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
443
444     def test_worker_model__inline(self):
445         self.assertTrue(passing_run(['--worker-model', 'old-inline']))
446
447     def test_worker_model__threads(self):
448         self.assertTrue(passing_run(['--worker-model', 'old-threads']))
449
450     def test_worker_model__unknown(self):
451         self.assertRaises(ValueError, logging_run,
452                           ['--worker-model', 'unknown'])
453
454 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
455
456
457
458 def _mocked_open(original_open, file_list):
459     def _wrapper(name, mode, encoding):
460         if name.find("-expected.") != -1 and mode.find("w") != -1:
461             # we don't want to actually write new baselines, so stub these out
462             name.replace('\\', '/')
463             file_list.append(name)
464             return original_open(os.devnull, mode, encoding)
465         return original_open(name, mode, encoding)
466     return _wrapper
467
468
469 class RebaselineTest(unittest.TestCase):
470     def assertBaselines(self, file_list, file):
471         "assert that the file_list contains the baselines."""
472         for ext in [".txt", ".png", ".checksum"]:
473             baseline = file + "-expected" + ext
474             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
475
476     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
477     # supposed to be.
478
479     def disabled_test_reset_results(self):
480         # FIXME: This test is disabled until we can rewrite it to use a
481         # mock filesystem.
482         #
483         # Test that we update expectations in place. If the expectation
484         # is missing, update the expected generic location.
485         file_list = []
486         passing_run(['--pixel-tests',
487                         '--reset-results',
488                         'passes/image.html',
489                         'failures/expected/missing_image.html'],
490                         tests_included=True)
491         self.assertEqual(len(file_list), 6)
492         self.assertBaselines(file_list,
493             "data/passes/image")
494         self.assertBaselines(file_list,
495             "data/failures/expected/missing_image")
496
497     def disabled_test_new_baseline(self):
498         # FIXME: This test is disabled until we can rewrite it to use a
499         # mock filesystem.
500         #
501         # Test that we update the platform expectations. If the expectation
502         # is mssing, then create a new expectation in the platform dir.
503         file_list = []
504         original_open = codecs.open
505         try:
506             # Test that we update the platform expectations. If the expectation
507             # is mssing, then create a new expectation in the platform dir.
508             file_list = []
509             codecs.open = _mocked_open(original_open, file_list)
510             passing_run(['--pixel-tests',
511                          '--new-baseline',
512                          'passes/image.html',
513                          'failures/expected/missing_image.html'],
514                         tests_included=True)
515             self.assertEqual(len(file_list), 6)
516             self.assertBaselines(file_list,
517                 "data/platform/test/passes/image")
518             self.assertBaselines(file_list,
519                 "data/platform/test/failures/expected/missing_image")
520         finally:
521             codecs.open = original_open
522
523
524 class TestRunnerWrapper(run_webkit_tests.TestRunner):
525     def _get_test_input_for_file(self, test_file):
526         return test_file
527
528
529 class TestRunnerTest(unittest.TestCase):
530     def test_results_html(self):
531         mock_port = Mock()
532         mock_port.relative_test_filename = lambda name: name
533         mock_port.filename_to_uri = lambda name: name
534
535         runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(),
536             printer=Mock())
537         expected_html = u"""<html>
538   <head>
539     <title>Layout Test Results (time)</title>
540   </head>
541   <body>
542     <h2>Title (time)</h2>
543         <p><a href='test_path'>test_path</a><br />
544 </p>
545 </body></html>
546 """
547         html = runner._results_html(["test_path"], {}, "Title", override_time="time")
548         self.assertEqual(html, expected_html)
549
550     def test_shard_tests(self):
551         # Test that _shard_tests in run_webkit_tests.TestRunner really
552         # put the http tests first in the queue.
553         runner = TestRunnerWrapper(port=Mock(), options=Mock(),
554             printer=Mock())
555
556         test_list = [
557           "LayoutTests/websocket/tests/unicode.htm",
558           "LayoutTests/animations/keyframes.html",
559           "LayoutTests/http/tests/security/view-source-no-refresh.html",
560           "LayoutTests/websocket/tests/websocket-protocol-ignored.html",
561           "LayoutTests/fast/css/display-none-inline-style-change-crash.html",
562           "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html",
563           "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html",
564           "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html",
565           "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html",
566         ]
567
568         expected_tests_to_http_lock = set([
569           'LayoutTests/websocket/tests/unicode.htm',
570           'LayoutTests/http/tests/security/view-source-no-refresh.html',
571           'LayoutTests/websocket/tests/websocket-protocol-ignored.html',
572           'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
573         ])
574
575         # FIXME: Ideally the HTTP tests don't have to all be in one shard.
576         single_thread_results = runner._shard_tests(test_list, False)
577         multi_thread_results = runner._shard_tests(test_list, True)
578
579         self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
580         self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
581         self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
582         self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
583
584
585 class DryrunTest(unittest.TestCase):
586     # FIXME: it's hard to know which platforms are safe to test; the
587     # chromium platforms require a chromium checkout, and the mac platform
588     # requires fcntl, so it can't be tested on win32, etc. There is
589     # probably a better way of handling this.
590     def test_darwin(self):
591         if sys.platform != "darwin":
592             return
593
594         self.assertTrue(passing_run(['--platform', 'test']))
595         self.assertTrue(passing_run(['--platform', 'dryrun',
596                                      'fast/html']))
597         self.assertTrue(passing_run(['--platform', 'dryrun-mac',
598                                      'fast/html']))
599
600     def test_test(self):
601         self.assertTrue(passing_run(['--platform', 'dryrun-test',
602                                            '--pixel-tests']))
603
604
605 if __name__ == '__main__':
606     unittest.main()