run-webkit-tests should have ability to add description to its JSON output
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Unit tests for run_perf_tests."""
31
32 import StringIO
33 import json
34 import unittest
35
36 from webkitpy.common.host_mock import MockHost
37 from webkitpy.common.system.filesystem_mock import MockFileSystem
38 from webkitpy.common.system.outputcapture import OutputCapture
39 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
40 from webkitpy.layout_tests.port.test import TestPort
41 from webkitpy.layout_tests.views import printing
42 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
43 from webkitpy.performance_tests.perftest import PerfTest
44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
45
46
47 class MainTest(unittest.TestCase):
48     def assertWritten(self, stream, contents):
49         self.assertEquals(stream.buflist, contents)
50
51     class TestDriver:
52         def run_test(self, driver_input):
53             text = ''
54             timeout = False
55             crash = False
56             if driver_input.test_name.endswith('pass.html'):
57                 text = 'RESULT group_name: test_name= 42 ms'
58             elif driver_input.test_name.endswith('timeout.html'):
59                 timeout = True
60             elif driver_input.test_name.endswith('failed.html'):
61                 text = None
62             elif driver_input.test_name.endswith('tonguey.html'):
63                 text = 'we are not expecting an output from perf tests but RESULT blablabla'
64             elif driver_input.test_name.endswith('crash.html'):
65                 crash = True
66             elif driver_input.test_name.endswith('event-target-wrapper.html'):
67                 text = """Running 20 times
68 Ignoring warm-up run (1502)
69 1504
70 1505
71 1510
72 1504
73 1507
74 1509
75 1510
76 1487
77 1488
78 1472
79 1472
80 1488
81 1473
82 1472
83 1475
84 1487
85 1486
86 1486
87 1475
88 1471
89
90 avg 1489.05
91 median 1487
92 stdev 14.46
93 min 1471
94 max 1510
95 """
96             elif driver_input.test_name.endswith('some-parser.html'):
97                 text = """Running 20 times
98 Ignoring warm-up run (1115)
99
100 avg 1100
101 median 1101
102 stdev 11
103 min 1080
104 max 1120
105 """
106             return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
107
108         def start(self):
109             """do nothing"""
110
111         def stop(self):
112             """do nothing"""
113
114     def create_runner(self, args=[], driver_class=TestDriver):
115         options, parsed_args = PerfTestsRunner._parse_args(args)
116         test_port = TestPort(host=MockHost(), options=options)
117         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
118
119         runner = PerfTestsRunner(args=args, port=test_port)
120         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
121         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
122         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
123         return runner, test_port
124
125     def run_test(self, test_name):
126         runner, port = self.create_runner()
127         driver = MainTest.TestDriver()
128         return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
129
130     def test_run_passing_test(self):
131         self.assertTrue(self.run_test('pass.html'))
132
133     def test_run_silent_test(self):
134         self.assertFalse(self.run_test('silent.html'))
135
136     def test_run_failed_test(self):
137         self.assertFalse(self.run_test('failed.html'))
138
139     def test_run_tonguey_test(self):
140         self.assertFalse(self.run_test('tonguey.html'))
141
142     def test_run_timeout_test(self):
143         self.assertFalse(self.run_test('timeout.html'))
144
145     def test_run_crash_test(self):
146         self.assertFalse(self.run_test('crash.html'))
147
148     def _tests_for_runner(self, runner, test_names):
149         filesystem = runner._host.filesystem
150         tests = []
151         for test in test_names:
152             path = filesystem.join(runner._base_path, test)
153             dirname = filesystem.dirname(path)
154             if test.startswith('inspector/'):
155                 tests.append(ChromiumStylePerfTest(runner._port, test, path))
156             else:
157                 tests.append(PerfTest(runner._port, test, path))
158         return tests
159
160     def test_run_test_set(self):
161         runner, port = self.create_runner()
162         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
163             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
164         output = OutputCapture()
165         output.capture_output()
166         try:
167             unexpected_result_count = runner._run_tests_set(tests, port)
168         finally:
169             stdout, stderr, log = output.restore_output()
170         self.assertEqual(unexpected_result_count, len(tests) - 1)
171         self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
172
173     def test_run_test_set_kills_drt_per_run(self):
174
175         class TestDriverWithStopCount(MainTest.TestDriver):
176             stop_count = 0
177
178             def stop(self):
179                 TestDriverWithStopCount.stop_count += 1
180
181         runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
182
183         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
184             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
185         unexpected_result_count = runner._run_tests_set(tests, port)
186
187         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
188
189     def test_run_test_pause_before_testing(self):
190         class TestDriverWithStartCount(MainTest.TestDriver):
191             start_count = 0
192
193             def start(self):
194                 TestDriverWithStartCount.start_count += 1
195
196         runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
197         tests = self._tests_for_runner(runner, ['inspector/pass.html'])
198
199         output = OutputCapture()
200         output.capture_output()
201         try:
202             unexpected_result_count = runner._run_tests_set(tests, port)
203             self.assertEqual(TestDriverWithStartCount.start_count, 1)
204         finally:
205             stdout, stderr, log = output.restore_output()
206         self.assertEqual(stderr, "Ready to run test?\n")
207         self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n")
208
209     def test_run_test_set_for_parser_tests(self):
210         runner, port = self.create_runner()
211         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
212         output = OutputCapture()
213         output.capture_output()
214         try:
215             unexpected_result_count = runner._run_tests_set(tests, port)
216         finally:
217             stdout, stderr, log = output.restore_output()
218         self.assertEqual(unexpected_result_count, 0)
219         self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
220         'RESULT Bindings: event-target-wrapper= 1489.05 ms',
221         'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
222         '',
223         'Running Parser/some-parser.html (2 of 2)',
224         'RESULT Parser: some-parser= 1100.0 ms',
225         'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
226         '', '']))
227
228     def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
229         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
230         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
231
232         uploaded = [False]
233
234         def mock_upload_json(hostname, json_path):
235             self.assertEqual(hostname, 'some.host')
236             self.assertEqual(json_path, '/mock-checkout/output.json')
237             uploaded[0] = True
238             return upload_suceeds
239
240         runner._upload_json = mock_upload_json
241         runner._timestamp = 123456789
242         output_capture = OutputCapture()
243         output_capture.capture_output()
244         try:
245             self.assertEqual(runner.run(), expected_exit_code)
246         finally:
247             stdout, stderr, logs = output_capture.restore_output()
248
249         if not expected_exit_code:
250             self.assertEqual(logs, '\n'.join([
251                 'Running 2 tests',
252                 'Running Bindings/event-target-wrapper.html (1 of 2)',
253                 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
254                 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
255                 '',
256                 'Running inspector/pass.html (2 of 2)',
257                 'RESULT group_name: test_name= 42 ms',
258                 '',
259                 '']))
260
261         return uploaded[0]
262
263     def test_run_with_json_output(self):
264         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
265             '--test-results-server=some.host'])
266         self._test_run_with_json_output(runner, port.host.filesystem)
267         self.assertEqual(json.loads(port.host.filesystem.read_text_file('/mock-checkout/output.json')), {
268             "timestamp": 123456789, "results":
269             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
270             "inspector/pass.html:group_name:test_name": 42},
271             "webkit-revision": 5678, "branch": "webkit-trunk"})
272
273     def test_run_with_description(self):
274         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
275             '--test-results-server=some.host', '--description', 'some description'])
276         self._test_run_with_json_output(runner, port.host.filesystem)
277         self.assertEqual(json.loads(port.host.filesystem.read_text_file('/mock-checkout/output.json')), {
278             "timestamp": 123456789, "description": "some description", "results":
279             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
280             "inspector/pass.html:group_name:test_name": 42},
281             "webkit-revision": 5678, "branch": "webkit-trunk"})
282
283     def create_runner_and_setup_results_template(self, args=[]):
284         runner, port = self.create_runner(args)
285         filesystem = port.host.filesystem
286         filesystem.write_text_file(runner._base_path + '/resources/results-template.html', 'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
287         filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
288         return runner, port
289
290     def test_run_respects_no_results(self):
291         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
292             '--test-results-server=some.host', '--no-results'])
293         self.assertFalse(self._test_run_with_json_output(runner, port.host.filesystem))
294         self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
295
296     def test_run_generates_json_by_default(self):
297         runner, port = self.create_runner_and_setup_results_template()
298         filesystem = port.host.filesystem
299         output_json_path = filesystem.join(port.perf_results_directory(), runner._DEFAULT_JSON_FILENAME)
300         results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
301
302         self.assertFalse(filesystem.isfile(output_json_path))
303         self.assertFalse(filesystem.isfile(results_page_path))
304
305         self._test_run_with_json_output(runner, port.host.filesystem)
306
307         self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
308             "timestamp": 123456789, "results":
309             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
310             "inspector/pass.html:group_name:test_name": 42},
311             "webkit-revision": 5678, "branch": "webkit-trunk"}])
312
313         self.assertTrue(filesystem.isfile(output_json_path))
314         self.assertTrue(filesystem.isfile(results_page_path))
315
316     def test_run_generates_and_show_results_page(self):
317         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
318         page_shown = []
319         port.show_results_html_file = lambda path: page_shown.append(path)
320         filesystem = port.host.filesystem
321         self._test_run_with_json_output(runner, filesystem)
322
323         expected_entry = {"timestamp": 123456789, "results": {"Bindings/event-target-wrapper":
324             {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
325             "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678, "branch": "webkit-trunk"}
326
327         self.maxDiff = None
328         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
329         self.assertEqual(json.loads(json_output), [expected_entry])
330         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
331             'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
332         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
333
334         self._test_run_with_json_output(runner, filesystem)
335         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
336         self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
337         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
338             'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
339
340     def test_run_with_bad_output_json(self):
341         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
342         port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
343         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
344         port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
345         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
346
347     def test_run_with_json_source(self):
348         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
349             '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
350         port.host.filesystem.write_text_file('/mock-checkout/source.json', '{"key": "value"}')
351         self._test_run_with_json_output(runner, port.host.filesystem)
352         self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
353             "timestamp": 123456789, "results":
354             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
355             "inspector/pass.html:group_name:test_name": 42},
356             "webkit-revision": 5678, "branch": "webkit-trunk",
357             "key": "value"})
358
359     def test_run_with_bad_json_source(self):
360         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
361             '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
362         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
363         port.host.filesystem.write_text_file('/mock-checkout/source.json', 'bad json')
364         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
365         port.host.filesystem.write_text_file('/mock-checkout/source.json', '["another bad json"]')
366         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
367
368     def test_run_with_multiple_repositories(self):
369         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
370             '--test-results-server=some.host'])
371         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
372         self._test_run_with_json_output(runner, port.host.filesystem)
373         self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
374             "timestamp": 123456789, "results":
375             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
376             "inspector/pass.html:group_name:test_name": 42.0},
377             "webkit-revision": 5678, "some-revision": 5678, "branch": "webkit-trunk"})
378
379     def test_run_with_upload_json(self):
380         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
381             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
382
383         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
384         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
385         self.assertEqual(generated_json['platform'], 'platform1')
386         self.assertEqual(generated_json['builder-name'], 'builder1')
387         self.assertEqual(generated_json['build-number'], 123)
388
389         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
390
391     def test_upload_json(self):
392         runner, port = self.create_runner()
393         port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
394
395         called = []
396         upload_single_text_file_throws = False
397         upload_single_text_file_return_value = StringIO.StringIO('OK')
398
399         class MockFileUploader:
400             def __init__(mock, url, timeout):
401                 self.assertEqual(url, 'https://some.host/api/test/report')
402                 self.assertTrue(isinstance(timeout, int) and timeout)
403                 called.append('FileUploader')
404
405             def upload_single_text_file(mock, filesystem, content_type, filename):
406                 self.assertEqual(filesystem, port.host.filesystem)
407                 self.assertEqual(content_type, 'application/json')
408                 self.assertEqual(filename, 'some.json')
409                 called.append('upload_single_text_file')
410                 if upload_single_text_file_throws:
411                     raise "Some exception"
412                 return upload_single_text_file_return_value
413
414         runner._upload_json('some.host', 'some.json', MockFileUploader)
415         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
416
417         output = OutputCapture()
418         output.capture_output()
419         upload_single_text_file_return_value = StringIO.StringIO('Some error')
420         runner._upload_json('some.host', 'some.json', MockFileUploader)
421         _, _, logs = output.restore_output()
422         self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
423
424         # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
425         called = []
426         upload_single_text_file_throws = True
427         runner._upload_json('some.host', 'some.json', MockFileUploader)
428         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
429
430     def _add_file(self, runner, dirname, filename, content=True):
431         dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
432         runner._host.filesystem.maybe_make_directory(dirname)
433         runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
434
435     def test_collect_tests(self):
436         runner, port = self.create_runner()
437         self._add_file(runner, 'inspector', 'a_file.html', 'a content')
438         tests = runner._collect_tests()
439         self.assertEqual(len(tests), 1)
440
441     def _collect_tests_and_sort_test_name(self, runner):
442         return sorted([test.test_name() for test in runner._collect_tests()])
443
444     def test_collect_tests_with_multile_files(self):
445         runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
446
447         def add_file(filename):
448             port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
449
450         add_file('test1.html')
451         add_file('test2.html')
452         add_file('test3.html')
453         port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
454         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
455
456     def test_collect_tests_with_skipped_list(self):
457         runner, port = self.create_runner()
458
459         self._add_file(runner, 'inspector', 'test1.html')
460         self._add_file(runner, 'inspector', 'unsupported_test1.html')
461         self._add_file(runner, 'inspector', 'test2.html')
462         self._add_file(runner, 'inspector/resources', 'resource_file.html')
463         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
464         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
465         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
466
467     def test_collect_tests_with_skipped_list(self):
468         runner, port = self.create_runner(args=['--force'])
469
470         self._add_file(runner, 'inspector', 'test1.html')
471         self._add_file(runner, 'inspector', 'unsupported_test1.html')
472         self._add_file(runner, 'inspector', 'test2.html')
473         self._add_file(runner, 'inspector/resources', 'resource_file.html')
474         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
475         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
476         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
477
478     def test_collect_tests_with_page_load_svg(self):
479         runner, port = self.create_runner()
480         self._add_file(runner, 'PageLoad', 'some-svg-test.svg')
481         tests = runner._collect_tests()
482         self.assertEqual(len(tests), 1)
483         self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
484
485     def test_collect_tests_should_ignore_replay_tests_by_default(self):
486         runner, port = self.create_runner()
487         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
488         self.assertEqual(runner._collect_tests(), [])
489
490     def test_collect_tests_with_replay_tests(self):
491         runner, port = self.create_runner(args=['--replay'])
492         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
493         tests = runner._collect_tests()
494         self.assertEqual(len(tests), 1)
495         self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
496
497     def test_parse_args(self):
498         runner, port = self.create_runner()
499         options, args = PerfTestsRunner._parse_args([
500                 '--build-directory=folder42',
501                 '--platform=platform42',
502                 '--builder-name', 'webkit-mac-1',
503                 '--build-number=56',
504                 '--time-out-ms=42',
505                 '--output-json-path=a/output.json',
506                 '--source-json-path=a/source.json',
507                 '--test-results-server=somehost',
508                 '--debug'])
509         self.assertEqual(options.build, True)
510         self.assertEqual(options.build_directory, 'folder42')
511         self.assertEqual(options.platform, 'platform42')
512         self.assertEqual(options.builder_name, 'webkit-mac-1')
513         self.assertEqual(options.build_number, '56')
514         self.assertEqual(options.time_out_ms, '42')
515         self.assertEqual(options.configuration, 'Debug')
516         self.assertEqual(options.output_json_path, 'a/output.json')
517         self.assertEqual(options.source_json_path, 'a/source.json')
518         self.assertEqual(options.test_results_server, 'somehost')
519
520
521 if __name__ == '__main__':
522     unittest.main()