run-perf-tests should record indivisual value instead of statistics
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Unit tests for run_perf_tests."""
31
32 import StringIO
33 import json
34 import re
35 import unittest
36
37 from webkitpy.common.host_mock import MockHost
38 from webkitpy.common.system.filesystem_mock import MockFileSystem
39 from webkitpy.common.system.outputcapture import OutputCapture
40 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
41 from webkitpy.layout_tests.port.test import TestPort
42 from webkitpy.layout_tests.views import printing
43 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
44 from webkitpy.performance_tests.perftest import PerfTest
45 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
46
47
48 class MainTest(unittest.TestCase):
49     def assertWritten(self, stream, contents):
50         self.assertEquals(stream.buflist, contents)
51
52     def normalizeFinishedTime(self, log):
53         return re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log)
54
55     class TestDriver:
56         def run_test(self, driver_input, stop_when_done):
57             text = ''
58             timeout = False
59             crash = False
60             if driver_input.test_name.endswith('pass.html'):
61                 text = 'RESULT group_name: test_name= 42 ms'
62             elif driver_input.test_name.endswith('timeout.html'):
63                 timeout = True
64             elif driver_input.test_name.endswith('failed.html'):
65                 text = None
66             elif driver_input.test_name.endswith('tonguey.html'):
67                 text = 'we are not expecting an output from perf tests but RESULT blablabla'
68             elif driver_input.test_name.endswith('crash.html'):
69                 crash = True
70             elif driver_input.test_name.endswith('event-target-wrapper.html'):
71                 text = """Running 20 times
72 Ignoring warm-up run (1502)
73 1504
74 1505
75 1510
76 1504
77 1507
78 1509
79 1510
80 1487
81 1488
82 1472
83 1472
84 1488
85 1473
86 1472
87 1475
88 1487
89 1486
90 1486
91 1475
92 1471
93
94 Time:
95 values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms
96 avg 1489.05 ms
97 median 1487 ms
98 stdev 14.46 ms
99 min 1471 ms
100 max 1510 ms
101 """
102             elif driver_input.test_name.endswith('some-parser.html'):
103                 text = """Running 20 times
104 Ignoring warm-up run (1115)
105
106 Time:
107 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
108 avg 1100 ms
109 median 1101 ms
110 stdev 11 ms
111 min 1080 ms
112 max 1120 ms
113 """
114             elif driver_input.test_name.endswith('memory-test.html'):
115                 text = """Running 20 times
116 Ignoring warm-up run (1115)
117
118 Time:
119 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
120 avg 1100 ms
121 median 1101 ms
122 stdev 11 ms
123 min 1080 ms
124 max 1120 ms
125
126 JS Heap:
127 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
128 avg 832000 bytes
129 median 829000 bytes
130 stdev 15000 bytes
131 min 811000 bytes
132 max 848000 bytes
133
134 Malloc:
135 values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
136 avg 532000 bytes
137 median 529000 bytes
138 stdev 13000 bytes
139 min 511000 bytes
140 max 548000 bytes
141 """
142             return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
143
144         def start(self):
145             """do nothing"""
146
147         def stop(self):
148             """do nothing"""
149
150     def create_runner(self, args=[], driver_class=TestDriver):
151         options, parsed_args = PerfTestsRunner._parse_args(args)
152         test_port = TestPort(host=MockHost(), options=options)
153         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
154
155         runner = PerfTestsRunner(args=args, port=test_port)
156         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
157         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
158         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
159
160         filesystem = runner._host.filesystem
161         runner.load_output_json = lambda: json.loads(filesystem.read_text_file(runner._output_json_path()))
162         return runner, test_port
163
164     def run_test(self, test_name):
165         runner, port = self.create_runner()
166         driver = MainTest.TestDriver()
167         return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
168
169     def test_run_passing_test(self):
170         self.assertTrue(self.run_test('pass.html'))
171
172     def test_run_silent_test(self):
173         self.assertFalse(self.run_test('silent.html'))
174
175     def test_run_failed_test(self):
176         self.assertFalse(self.run_test('failed.html'))
177
178     def test_run_tonguey_test(self):
179         self.assertFalse(self.run_test('tonguey.html'))
180
181     def test_run_timeout_test(self):
182         self.assertFalse(self.run_test('timeout.html'))
183
184     def test_run_crash_test(self):
185         self.assertFalse(self.run_test('crash.html'))
186
187     def _tests_for_runner(self, runner, test_names):
188         filesystem = runner._host.filesystem
189         tests = []
190         for test in test_names:
191             path = filesystem.join(runner._base_path, test)
192             dirname = filesystem.dirname(path)
193             if test.startswith('inspector/'):
194                 tests.append(ChromiumStylePerfTest(runner._port, test, path))
195             else:
196                 tests.append(PerfTest(runner._port, test, path))
197         return tests
198
199     def test_run_test_set(self):
200         runner, port = self.create_runner()
201         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
202             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
203         output = OutputCapture()
204         output.capture_output()
205         try:
206             unexpected_result_count = runner._run_tests_set(tests, port)
207         finally:
208             stdout, stderr, log = output.restore_output()
209         self.assertEqual(unexpected_result_count, len(tests) - 1)
210         self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
211
212     def test_run_test_set_kills_drt_per_run(self):
213
214         class TestDriverWithStopCount(MainTest.TestDriver):
215             stop_count = 0
216
217             def stop(self):
218                 TestDriverWithStopCount.stop_count += 1
219
220         runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
221
222         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
223             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
224         unexpected_result_count = runner._run_tests_set(tests, port)
225
226         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
227
228     def test_run_test_pause_before_testing(self):
229         class TestDriverWithStartCount(MainTest.TestDriver):
230             start_count = 0
231
232             def start(self):
233                 TestDriverWithStartCount.start_count += 1
234
235         runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
236         tests = self._tests_for_runner(runner, ['inspector/pass.html'])
237
238         output = OutputCapture()
239         output.capture_output()
240         try:
241             unexpected_result_count = runner._run_tests_set(tests, port)
242             self.assertEqual(TestDriverWithStartCount.start_count, 1)
243         finally:
244             stdout, stderr, log = output.restore_output()
245         self.assertEqual(stderr, "Ready to run test?\n")
246         self.assertEqual(self.normalizeFinishedTime(log),
247             "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\nFinished: 0.1 s\n\n")
248
249     def test_run_test_set_for_parser_tests(self):
250         runner, port = self.create_runner()
251         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
252         output = OutputCapture()
253         output.capture_output()
254         try:
255             unexpected_result_count = runner._run_tests_set(tests, port)
256         finally:
257             stdout, stderr, log = output.restore_output()
258         self.assertEqual(unexpected_result_count, 0)
259         self.assertEqual(self.normalizeFinishedTime(log), '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
260         'RESULT Bindings: event-target-wrapper= 1489.05 ms',
261         'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
262         'Finished: 0.1 s',
263         '',
264         'Running Parser/some-parser.html (2 of 2)',
265         'RESULT Parser: some-parser= 1100.0 ms',
266         'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
267         'Finished: 0.1 s',
268         '', '']))
269
270     def test_run_memory_test(self):
271         runner, port = self.create_runner_and_setup_results_template()
272         runner._timestamp = 123456789
273         port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
274
275         output = OutputCapture()
276         output.capture_output()
277         try:
278             unexpected_result_count = runner.run()
279         finally:
280             stdout, stderr, log = output.restore_output()
281         self.assertEqual(unexpected_result_count, 0)
282         self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
283             'Running 1 tests',
284             'Running Parser/memory-test.html (1 of 1)',
285             'RESULT Parser: memory-test= 1100.0 ms',
286             'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
287             'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
288             'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
289             'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
290             'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
291             'Finished: 0.1 s',
292             '', '']))
293         results = runner.load_output_json()[0]['results']
294         values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
295         self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
296         self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
297         self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
298
299     def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0):
300         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
301         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
302
303         uploaded = [False]
304
305         def mock_upload_json(hostname, json_path):
306             self.assertEqual(hostname, 'some.host')
307             self.assertEqual(json_path, '/mock-checkout/output.json')
308             uploaded[0] = upload_suceeds
309             return upload_suceeds
310
311         runner._upload_json = mock_upload_json
312         runner._timestamp = 123456789
313         output_capture = OutputCapture()
314         output_capture.capture_output()
315         try:
316             self.assertEqual(runner.run(), expected_exit_code)
317         finally:
318             stdout, stderr, logs = output_capture.restore_output()
319
320         if not expected_exit_code:
321             self.assertEqual(self.normalizeFinishedTime(logs),
322                 '\n'.join(['Running 2 tests',
323                 'Running Bindings/event-target-wrapper.html (1 of 2)',
324                 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
325                 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
326                 'Finished: 0.1 s',
327                 '',
328                 'Running inspector/pass.html (2 of 2)',
329                 'RESULT group_name: test_name= 42 ms',
330                 'Finished: 0.1 s',
331                 '',
332                 '']))
333
334         self.assertEqual(uploaded[0], upload_suceeds)
335
336         return logs
337
338     _event_target_wrapper_and_inspector_results = {
339         "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms",
340            "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
341         "inspector/pass.html:group_name:test_name": 42}
342
343     # FIXME: Remove this variance once perf-o-matic supported "values".
344     _event_target_wrapper_and_inspector_results_without_values = {
345         "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
346         "inspector/pass.html:group_name:test_name": 42}
347
348     def test_run_with_json_output(self):
349         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
350             '--test-results-server=some.host'])
351         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
352         self.assertEqual(runner.load_output_json(), {
353             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
354             "webkit-revision": "5678", "branch": "webkit-trunk"})
355
356     def test_run_with_description(self):
357         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
358             '--test-results-server=some.host', '--description', 'some description'])
359         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
360         self.assertEqual(runner.load_output_json(), {
361             "timestamp": 123456789, "description": "some description",
362             "results": self._event_target_wrapper_and_inspector_results_without_values,
363             "webkit-revision": "5678", "branch": "webkit-trunk"})
364
365     def create_runner_and_setup_results_template(self, args=[]):
366         runner, port = self.create_runner(args)
367         filesystem = port.host.filesystem
368         filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
369             'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
370             '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
371         filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
372         return runner, port
373
374     def test_run_respects_no_results(self):
375         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
376             '--test-results-server=some.host', '--no-results'])
377         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False)
378         self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
379
380     def test_run_generates_json_by_default(self):
381         runner, port = self.create_runner_and_setup_results_template()
382         filesystem = port.host.filesystem
383         output_json_path = filesystem.join(port.perf_results_directory(), runner._DEFAULT_JSON_FILENAME)
384         results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
385
386         self.assertFalse(filesystem.isfile(output_json_path))
387         self.assertFalse(filesystem.isfile(results_page_path))
388
389         self._test_run_with_json_output(runner, port.host.filesystem)
390
391         self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
392             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
393             "webkit-revision": "5678", "branch": "webkit-trunk"}])
394
395         self.assertTrue(filesystem.isfile(output_json_path))
396         self.assertTrue(filesystem.isfile(results_page_path))
397
398     def test_run_generates_and_show_results_page(self):
399         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
400         page_shown = []
401         port.show_results_html_file = lambda path: page_shown.append(path)
402         filesystem = port.host.filesystem
403         self._test_run_with_json_output(runner, filesystem)
404
405         expected_entry = {"timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
406             "webkit-revision": "5678", "branch": "webkit-trunk"}
407
408         self.maxDiff = None
409         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
410         self.assertEqual(json.loads(json_output), [expected_entry])
411         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
412             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
413             '<script>%s</script>END' % json_output)
414         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
415
416         self._test_run_with_json_output(runner, filesystem)
417         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
418         self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
419         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
420             'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
421             '<script>%s</script>END' % json_output)
422
423     def test_run_respects_no_show_results(self):
424         show_results_html_file = lambda path: page_shown.append(path)
425
426         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
427         page_shown = []
428         port.show_results_html_file = show_results_html_file
429         self._test_run_with_json_output(runner, port.host.filesystem)
430         self.assertEqual(page_shown[0], '/mock-checkout/output.html')
431
432         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
433             '--no-show-results'])
434         page_shown = []
435         port.show_results_html_file = show_results_html_file
436         self._test_run_with_json_output(runner, port.host.filesystem)
437         self.assertEqual(page_shown, [])
438
439     def test_run_with_bad_output_json(self):
440         runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
441         port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
442         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
443         port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
444         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
445
446     def test_run_with_slave_config_json(self):
447         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
448             '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
449         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
450         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
451         self.assertEqual(runner.load_output_json(), {
452             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
453             "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
454
455     def test_run_with_bad_slave_config_json(self):
456         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
457             '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
458         logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
459         self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
460         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
461         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
462         port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
463         self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
464
465     def test_run_with_multiple_repositories(self):
466         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
467             '--test-results-server=some.host'])
468         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
469         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
470         self.assertEqual(runner.load_output_json(), {
471             "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
472             "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
473
474     def test_run_with_upload_json(self):
475         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
476             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
477
478         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
479         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
480         self.assertEqual(generated_json['platform'], 'platform1')
481         self.assertEqual(generated_json['builder-name'], 'builder1')
482         self.assertEqual(generated_json['build-number'], 123)
483
484         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
485
486     def test_upload_json(self):
487         runner, port = self.create_runner()
488         port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
489
490         called = []
491         upload_single_text_file_throws = False
492         upload_single_text_file_return_value = StringIO.StringIO('OK')
493
494         class MockFileUploader:
495             def __init__(mock, url, timeout):
496                 self.assertEqual(url, 'https://some.host/api/test/report')
497                 self.assertTrue(isinstance(timeout, int) and timeout)
498                 called.append('FileUploader')
499
500             def upload_single_text_file(mock, filesystem, content_type, filename):
501                 self.assertEqual(filesystem, port.host.filesystem)
502                 self.assertEqual(content_type, 'application/json')
503                 self.assertEqual(filename, 'some.json')
504                 called.append('upload_single_text_file')
505                 if upload_single_text_file_throws:
506                     raise "Some exception"
507                 return upload_single_text_file_return_value
508
509         runner._upload_json('some.host', 'some.json', MockFileUploader)
510         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
511
512         output = OutputCapture()
513         output.capture_output()
514         upload_single_text_file_return_value = StringIO.StringIO('Some error')
515         runner._upload_json('some.host', 'some.json', MockFileUploader)
516         _, _, logs = output.restore_output()
517         self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
518
519         # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
520         called = []
521         upload_single_text_file_throws = True
522         runner._upload_json('some.host', 'some.json', MockFileUploader)
523         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
524
525     def _add_file(self, runner, dirname, filename, content=True):
526         dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
527         runner._host.filesystem.maybe_make_directory(dirname)
528         runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
529
530     def test_collect_tests(self):
531         runner, port = self.create_runner()
532         self._add_file(runner, 'inspector', 'a_file.html', 'a content')
533         tests = runner._collect_tests()
534         self.assertEqual(len(tests), 1)
535
536     def _collect_tests_and_sort_test_name(self, runner):
537         return sorted([test.test_name() for test in runner._collect_tests()])
538
539     def test_collect_tests_with_multile_files(self):
540         runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
541
542         def add_file(filename):
543             port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
544
545         add_file('test1.html')
546         add_file('test2.html')
547         add_file('test3.html')
548         port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
549         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
550
551     def test_collect_tests_with_skipped_list(self):
552         runner, port = self.create_runner()
553
554         self._add_file(runner, 'inspector', 'test1.html')
555         self._add_file(runner, 'inspector', 'unsupported_test1.html')
556         self._add_file(runner, 'inspector', 'test2.html')
557         self._add_file(runner, 'inspector/resources', 'resource_file.html')
558         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
559         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
560         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
561
562     def test_collect_tests_with_skipped_list(self):
563         runner, port = self.create_runner(args=['--force'])
564
565         self._add_file(runner, 'inspector', 'test1.html')
566         self._add_file(runner, 'inspector', 'unsupported_test1.html')
567         self._add_file(runner, 'inspector', 'test2.html')
568         self._add_file(runner, 'inspector/resources', 'resource_file.html')
569         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
570         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
571         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
572
573     def test_collect_tests_with_page_load_svg(self):
574         runner, port = self.create_runner()
575         self._add_file(runner, 'PageLoad', 'some-svg-test.svg')
576         tests = runner._collect_tests()
577         self.assertEqual(len(tests), 1)
578         self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
579
580     def test_collect_tests_should_ignore_replay_tests_by_default(self):
581         runner, port = self.create_runner()
582         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
583         self.assertEqual(runner._collect_tests(), [])
584
585     def test_collect_tests_with_replay_tests(self):
586         runner, port = self.create_runner(args=['--replay'])
587         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
588         tests = runner._collect_tests()
589         self.assertEqual(len(tests), 1)
590         self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
591
592     def test_parse_args(self):
593         runner, port = self.create_runner()
594         options, args = PerfTestsRunner._parse_args([
595                 '--build-directory=folder42',
596                 '--platform=platform42',
597                 '--builder-name', 'webkit-mac-1',
598                 '--build-number=56',
599                 '--time-out-ms=42',
600                 '--output-json-path=a/output.json',
601                 '--source-json-path=a/source.json',
602                 '--test-results-server=somehost',
603                 '--debug'])
604         self.assertEqual(options.build, True)
605         self.assertEqual(options.build_directory, 'folder42')
606         self.assertEqual(options.platform, 'platform42')
607         self.assertEqual(options.builder_name, 'webkit-mac-1')
608         self.assertEqual(options.build_number, '56')
609         self.assertEqual(options.time_out_ms, '42')
610         self.assertEqual(options.configuration, 'Debug')
611         self.assertEqual(options.output_json_path, 'a/output.json')
612         self.assertEqual(options.source_json_path, 'a/source.json')
613         self.assertEqual(options.test_results_server, 'somehost')
614
615
616 if __name__ == '__main__':
617     unittest.main()