REGRESSION: run-perf-tests no longer reports the total test time
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Unit tests for run_perf_tests."""
31
32 import StringIO
33 import json
34 import re
35 import unittest
36
37 from webkitpy.common.host_mock import MockHost
38 from webkitpy.common.system.filesystem_mock import MockFileSystem
39 from webkitpy.common.system.outputcapture import OutputCapture
40 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
41 from webkitpy.layout_tests.port.test import TestPort
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
44 from webkitpy.performance_tests.perftest import PerfTest
45 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
46
47
48 class MainTest(unittest.TestCase):
49     def assertWritten(self, stream, contents):
50         self.assertEquals(stream.buflist, contents)
51
52     def normalizeFinishedTime(self, log):
53         return re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log)
54
55     class TestDriver:
56         def run_test(self, driver_input, stop_when_done):
57             text = ''
58             timeout = False
59             crash = False
60             if driver_input.test_name.endswith('pass.html'):
61                 text = 'RESULT group_name: test_name= 42 ms'
62             elif driver_input.test_name.endswith('timeout.html'):
63                 timeout = True
64             elif driver_input.test_name.endswith('failed.html'):
65                 text = None
66             elif driver_input.test_name.endswith('tonguey.html'):
67                 text = 'we are not expecting an output from perf tests but RESULT blablabla'
68             elif driver_input.test_name.endswith('crash.html'):
69                 crash = True
70             elif driver_input.test_name.endswith('event-target-wrapper.html'):
71                 text = """Running 20 times
72 Ignoring warm-up run (1502)
73 1504
74 1505
75 1510
76 1504
77 1507
78 1509
79 1510
80 1487
81 1488
82 1472
83 1472
84 1488
85 1473
86 1472
87 1475
88 1487
89 1486
90 1486
91 1475
92 1471
93
94 Time:
95 avg 1489.05 ms
96 median 1487 ms
97 stdev 14.46 ms
98 min 1471 ms
99 max 1510 ms
100 """
101             elif driver_input.test_name.endswith('some-parser.html'):
102                 text = """Running 20 times
103 Ignoring warm-up run (1115)
104
105 Time:
106 avg 1100 ms
107 median 1101 ms
108 stdev 11 ms
109 min 1080 ms
110 max 1120 ms
111 """
112             elif driver_input.test_name.endswith('memory-test.html'):
113                 text = """Running 20 times
114 Ignoring warm-up run (1115)
115
116 Time:
117 avg 1100 ms
118 median 1101 ms
119 stdev 11 ms
120 min 1080 ms
121 max 1120 ms
122
123 JS Heap:
124 avg 832000 bytes
125 median 829000 bytes
126 stdev 15000 bytes
127 min 811000 bytes
128 max 848000 bytes
129
130 Malloc:
131 avg 532000 bytes
132 median 529000 bytes
133 stdev 13000 bytes
134 min 511000 bytes
135 max 548000 bytes
136 """
137             return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
138
139         def start(self):
140             """do nothing"""
141
142         def stop(self):
143             """do nothing"""
144
145     def create_runner(self, args=[], driver_class=TestDriver):
146         options, parsed_args = PerfTestsRunner._parse_args(args)
147         test_port = TestPort(host=MockHost(), options=options)
148         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
149
150         runner = PerfTestsRunner(args=args, port=test_port)
151         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
152         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
153         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
154
155         filesystem = runner._host.filesystem
156         runner.load_output_json = lambda: json.loads(filesystem.read_text_file(runner._output_json_path()))
157         return runner, test_port
158
159     def run_test(self, test_name):
160         runner, port = self.create_runner()
161         driver = MainTest.TestDriver()
162         return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
163
164     def test_run_passing_test(self):
165         self.assertTrue(self.run_test('pass.html'))
166
167     def test_run_silent_test(self):
168         self.assertFalse(self.run_test('silent.html'))
169
170     def test_run_failed_test(self):
171         self.assertFalse(self.run_test('failed.html'))
172
173     def test_run_tonguey_test(self):
174         self.assertFalse(self.run_test('tonguey.html'))
175
176     def test_run_timeout_test(self):
177         self.assertFalse(self.run_test('timeout.html'))
178
179     def test_run_crash_test(self):
180         self.assertFalse(self.run_test('crash.html'))
181
182     def _tests_for_runner(self, runner, test_names):
183         filesystem = runner._host.filesystem
184         tests = []
185         for test in test_names:
186             path = filesystem.join(runner._base_path, test)
187             dirname = filesystem.dirname(path)
188             if test.startswith('inspector/'):
189                 tests.append(ChromiumStylePerfTest(runner._port, test, path))
190             else:
191                 tests.append(PerfTest(runner._port, test, path))
192         return tests
193
194     def test_run_test_set(self):
195         runner, port = self.create_runner()
196         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
197             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
198         output = OutputCapture()
199         output.capture_output()
200         try:
201             unexpected_result_count = runner._run_tests_set(tests, port)
202         finally:
203             stdout, stderr, log = output.restore_output()
204         self.assertEqual(unexpected_result_count, len(tests) - 1)
205         self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
206
207     def test_run_test_set_kills_drt_per_run(self):
208
209         class TestDriverWithStopCount(MainTest.TestDriver):
210             stop_count = 0
211
212             def stop(self):
213                 TestDriverWithStopCount.stop_count += 1
214
215         runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
216
217         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
218             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
219         unexpected_result_count = runner._run_tests_set(tests, port)
220
221         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
222
223     def test_run_test_pause_before_testing(self):
224         class TestDriverWithStartCount(MainTest.TestDriver):
225             start_count = 0
226
227             def start(self):
228                 TestDriverWithStartCount.start_count += 1
229
230         runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
231         tests = self._tests_for_runner(runner, ['inspector/pass.html'])
232
233         output = OutputCapture()
234         output.capture_output()
235         try:
236             unexpected_result_count = runner._run_tests_set(tests, port)
237             self.assertEqual(TestDriverWithStartCount.start_count, 1)
238         finally:
239             stdout, stderr, log = output.restore_output()
240         self.assertEqual(stderr, "Ready to run test?\n")
241         self.assertEqual(self.normalizeFinishedTime(log),
242             "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\nFinished: 0.1 s\n\n")
243
244     def test_run_test_set_for_parser_tests(self):
245         runner, port = self.create_runner()
246         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
247         output = OutputCapture()
248         output.capture_output()
249         try:
250             unexpected_result_count = runner._run_tests_set(tests, port)
251         finally:
252             stdout, stderr, log = output.restore_output()
253         self.assertEqual(unexpected_result_count, 0)
254         self.assertEqual(self.normalizeFinishedTime(log), '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
255         'RESULT Bindings: event-target-wrapper= 1489.05 ms',
256         'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
257         'Finished: 0.1 s',
258         '',
259         'Running Parser/some-parser.html (2 of 2)',
260         'RESULT Parser: some-parser= 1100.0 ms',
261         'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
262         'Finished: 0.1 s',
263         '', '']))
264
265     def test_run_memory_test(self):
266         runner, port = self.create_runner_and_setup_results_template()
267         runner._timestamp = 123456789
268         port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
269
270         output = OutputCapture()
271         output.capture_output()
272         try:
273             unexpected_result_count = runner.run()
274         finally:
275             stdout, stderr, log = output.restore_output()
276         self.assertEqual(unexpected_result_count, 0)
277         self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
278             'Running 1 tests',
279             'Running Parser/memory-test.html (1 of 1)',
280             'RESULT Parser: memory-test= 1100.0 ms',
281             'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
282             'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
283             'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
284             'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
285             'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
286             'Finished: 0.1 s',
287             '', '']))
288         results = runner.load_output_json()[0]['results']
289         self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'})
290         self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'})
291         self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'})
292
293     def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0):
294         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
295         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
296
297         uploaded = [False]
298
299         def mock_upload_json(hostname, json_path):
300             self.assertEqual(hostname, 'some.host')
301             self.assertEqual(json_path, '/mock-checkout/output.json')
302             uploaded[0] = upload_suceeds
303             return upload_suceeds
304
305         runner._upload_json = mock_upload_json
306         runner._timestamp = 123456789
307         output_capture = OutputCapture()
308         output_capture.capture_output()
309         try:
310             self.assertEqual(runner.run(), expected_exit_code)
311         finally:
312             stdout, stderr, logs = output_capture.restore_output()
313
314         if not expected_exit_code:
315             self.assertEqual(self.normalizeFinishedTime(logs),
316                 '\n'.join(['Running 2 tests',
317                 'Running Bindings/event-target-wrapper.html (1 of 2)',
318                 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
319                 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
320                 'Finished: 0.1 s',
321                 '',
322                 'Running inspector/pass.html (2 of 2)',
323                 'RESULT group_name: test_name= 42 ms',
324                 'Finished: 0.1 s',
325                 '',
326                 '']))
327
328         self.assertEqual(uploaded[0], upload_suceeds)
329
330         return logs
331
332     _event_target_wrapper_and_inspector_results = {
333         "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
334         "inspector/pass.html:group_name:test_name": 42}
335
336     def test_run_with_json_output(self):
337         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
338             '--test-results-server=some.host'])
339         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
340         self.assertEqual(runner.load_output_json(), {
341             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
342             "webkit-revision": "5678", "branch": "webkit-trunk"})
343
344     def test_run_with_description(self):
345         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
346             '--test-results-server=some.host', '--description', 'some description'])
347         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
348         self.assertEqual(runner.load_output_json(), {
349             "timestamp": 123456789, "description": "some description",
350             "results": self._event_target_wrapper_and_inspector_results,
351             "webkit-revision": "5678", "branch": "webkit-trunk"})
352
353     def create_runner_and_setup_results_template(self, args=[]):
354         runner, port = self.create_runner(args)
355         filesystem = port.host.filesystem
356         filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
357             'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
358             '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
359         filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
360         return runner, port
361
362     def test_run_respects_no_results(self):
363         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
364             '--test-results-server=some.host', '--no-results'])
365         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False)
366         self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
367
368     def test_run_generates_json_by_default(self):
369         runner, port = self.create_runner_and_setup_results_template()
370         filesystem = port.host.filesystem
371         output_json_path = filesystem.join(port.perf_results_directory(), runner._DEFAULT_JSON_FILENAME)
372         results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
373
374         self.assertFalse(filesystem.isfile(output_json_path))
375         self.assertFalse(filesystem.isfile(results_page_path))
376
377         self._test_run_with_json_output(runner, port.host.filesystem)
378
379         self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
380             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
381             "webkit-revision": "5678", "branch": "webkit-trunk"}])
382
383         self.assertTrue(filesystem.isfile(output_json_path))
384         self.assertTrue(filesystem.isfile(results_page_path))
385
386     def test_run_generates_and_show_results_page(self):
387         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
388         page_shown = []
389         port.show_results_html_file = lambda path: page_shown.append(path)
390         filesystem = port.host.filesystem
391         self._test_run_with_json_output(runner, filesystem)
392
393         expected_entry = {"timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
394             "webkit-revision": "5678", "branch": "webkit-trunk"}
395
396         self.maxDiff = None
397         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
398         self.assertEqual(json.loads(json_output), [expected_entry])
399         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
400             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
401             '<script>%s</script>END' % json_output)
402         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
403
404         self._test_run_with_json_output(runner, filesystem)
405         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
406         self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
407         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
408             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
409             '<script>%s</script>END' % json_output)
410
411     def test_run_respects_no_show_results(self):
412         show_results_html_file = lambda path: page_shown.append(path)
413
414         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
415         page_shown = []
416         port.show_results_html_file = show_results_html_file
417         self._test_run_with_json_output(runner, port.host.filesystem)
418         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
419
420         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
421             '--no-show-results'])
422         page_shown = []
423         port.show_results_html_file = show_results_html_file
424         self._test_run_with_json_output(runner, port.host.filesystem)
425         self.assertEqual(page_shown, [])
426
427     def test_run_with_bad_output_json(self):
428         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
429         port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
430         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
431         port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
432         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
433
434     def test_run_with_slave_config_json(self):
435         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
436             '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
437         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
438         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
439         self.assertEqual(runner.load_output_json(), {
440             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
441             "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
442
443     def test_run_with_bad_slave_config_json(self):
444         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
445             '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
446         logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
447         self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
448         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
449         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
450         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
451         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
452
453     def test_run_with_multiple_repositories(self):
454         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
455             '--test-results-server=some.host'])
456         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
457         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
458         self.assertEqual(runner.load_output_json(), {
459             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
460             "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
461
462     def test_run_with_upload_json(self):
463         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
464             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
465
466         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
467         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
468         self.assertEqual(generated_json['platform'], 'platform1')
469         self.assertEqual(generated_json['builder-name'], 'builder1')
470         self.assertEqual(generated_json['build-number'], 123)
471
472         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
473
474     def test_upload_json(self):
475         runner, port = self.create_runner()
476         port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
477
478         called = []
479         upload_single_text_file_throws = False
480         upload_single_text_file_return_value = StringIO.StringIO('OK')
481
482         class MockFileUploader:
483             def __init__(mock, url, timeout):
484                 self.assertEqual(url, 'https://some.host/api/test/report')
485                 self.assertTrue(isinstance(timeout, int) and timeout)
486                 called.append('FileUploader')
487
488             def upload_single_text_file(mock, filesystem, content_type, filename):
489                 self.assertEqual(filesystem, port.host.filesystem)
490                 self.assertEqual(content_type, 'application/json')
491                 self.assertEqual(filename, 'some.json')
492                 called.append('upload_single_text_file')
493                 if upload_single_text_file_throws:
494                     raise "Some exception"
495                 return upload_single_text_file_return_value
496
497         runner._upload_json('some.host', 'some.json', MockFileUploader)
498         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
499
500         output = OutputCapture()
501         output.capture_output()
502         upload_single_text_file_return_value = StringIO.StringIO('Some error')
503         runner._upload_json('some.host', 'some.json', MockFileUploader)
504         _, _, logs = output.restore_output()
505         self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
506
507         # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
508         called = []
509         upload_single_text_file_throws = True
510         runner._upload_json('some.host', 'some.json', MockFileUploader)
511         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
512
513     def _add_file(self, runner, dirname, filename, content=True):
514         dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
515         runner._host.filesystem.maybe_make_directory(dirname)
516         runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
517
518     def test_collect_tests(self):
519         runner, port = self.create_runner()
520         self._add_file(runner, 'inspector', 'a_file.html', 'a content')
521         tests = runner._collect_tests()
522         self.assertEqual(len(tests), 1)
523
524     def _collect_tests_and_sort_test_name(self, runner):
525         return sorted([test.test_name() for test in runner._collect_tests()])
526
527     def test_collect_tests_with_multile_files(self):
528         runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
529
530         def add_file(filename):
531             port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
532
533         add_file('test1.html')
534         add_file('test2.html')
535         add_file('test3.html')
536         port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
537         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
538
539     def test_collect_tests_with_skipped_list(self):
540         runner, port = self.create_runner()
541
542         self._add_file(runner, 'inspector', 'test1.html')
543         self._add_file(runner, 'inspector', 'unsupported_test1.html')
544         self._add_file(runner, 'inspector', 'test2.html')
545         self._add_file(runner, 'inspector/resources', 'resource_file.html')
546         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
547         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
548         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
549
550     def test_collect_tests_with_skipped_list(self):
551         runner, port = self.create_runner(args=['--force'])
552
553         self._add_file(runner, 'inspector', 'test1.html')
554         self._add_file(runner, 'inspector', 'unsupported_test1.html')
555         self._add_file(runner, 'inspector', 'test2.html')
556         self._add_file(runner, 'inspector/resources', 'resource_file.html')
557         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
558         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
559         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
560
561     def test_collect_tests_with_page_load_svg(self):
562         runner, port = self.create_runner()
563         self._add_file(runner, 'PageLoad', 'some-svg-test.svg')
564         tests = runner._collect_tests()
565         self.assertEqual(len(tests), 1)
566         self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
567
568     def test_collect_tests_should_ignore_replay_tests_by_default(self):
569         runner, port = self.create_runner()
570         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
571         self.assertEqual(runner._collect_tests(), [])
572
573     def test_collect_tests_with_replay_tests(self):
574         runner, port = self.create_runner(args=['--replay'])
575         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
576         tests = runner._collect_tests()
577         self.assertEqual(len(tests), 1)
578         self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
579
580     def test_parse_args(self):
581         runner, port = self.create_runner()
582         options, args = PerfTestsRunner._parse_args([
583                 '--build-directory=folder42',
584                 '--platform=platform42',
585                 '--builder-name', 'webkit-mac-1',
586                 '--build-number=56',
587                 '--time-out-ms=42',
588                 '--output-json-path=a/output.json',
589                 '--source-json-path=a/source.json',
590                 '--test-results-server=somehost',
591                 '--debug'])
592         self.assertEqual(options.build, True)
593         self.assertEqual(options.build_directory, 'folder42')
594         self.assertEqual(options.platform, 'platform42')
595         self.assertEqual(options.builder_name, 'webkit-mac-1')
596         self.assertEqual(options.build_number, '56')
597         self.assertEqual(options.time_out_ms, '42')
598         self.assertEqual(options.configuration, 'Debug')
599         self.assertEqual(options.output_json_path, 'a/output.json')
600         self.assertEqual(options.source_json_path, 'a/source.json')
601         self.assertEqual(options.test_results_server, 'somehost')
602
603
604 if __name__ == '__main__':
605     unittest.main()