run-perf-tests should generate a results page
[WebKit-https.git] / Tools / Scripts / webkitpy / performance_tests / perftestsrunner_unittest.py
1 #!/usr/bin/python
2 # Copyright (C) 2012 Google Inc. All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """Unit tests for run_perf_tests."""
31
32 import StringIO
33 import json
34 import unittest
35
36 from webkitpy.common.host_mock import MockHost
37 from webkitpy.common.system.filesystem_mock import MockFileSystem
38 from webkitpy.common.system.outputcapture import OutputCapture
39 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
40 from webkitpy.layout_tests.port.test import TestPort
41 from webkitpy.layout_tests.views import printing
42 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
43 from webkitpy.performance_tests.perftest import PerfTest
44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
45
46
47 class MainTest(unittest.TestCase):
48     def assertWritten(self, stream, contents):
49         self.assertEquals(stream.buflist, contents)
50
51     class TestDriver:
52         def run_test(self, driver_input):
53             text = ''
54             timeout = False
55             crash = False
56             if driver_input.test_name.endswith('pass.html'):
57                 text = 'RESULT group_name: test_name= 42 ms'
58             elif driver_input.test_name.endswith('timeout.html'):
59                 timeout = True
60             elif driver_input.test_name.endswith('failed.html'):
61                 text = None
62             elif driver_input.test_name.endswith('tonguey.html'):
63                 text = 'we are not expecting an output from perf tests but RESULT blablabla'
64             elif driver_input.test_name.endswith('crash.html'):
65                 crash = True
66             elif driver_input.test_name.endswith('event-target-wrapper.html'):
67                 text = """Running 20 times
68 Ignoring warm-up run (1502)
69 1504
70 1505
71 1510
72 1504
73 1507
74 1509
75 1510
76 1487
77 1488
78 1472
79 1472
80 1488
81 1473
82 1472
83 1475
84 1487
85 1486
86 1486
87 1475
88 1471
89
90 avg 1489.05
91 median 1487
92 stdev 14.46
93 min 1471
94 max 1510
95 """
96             elif driver_input.test_name.endswith('some-parser.html'):
97                 text = """Running 20 times
98 Ignoring warm-up run (1115)
99
100 avg 1100
101 median 1101
102 stdev 11
103 min 1080
104 max 1120
105 """
106             return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
107
108         def start(self):
109             """do nothing"""
110
111         def stop(self):
112             """do nothing"""
113
114     def create_runner(self, args=[], driver_class=TestDriver):
115         options, parsed_args = PerfTestsRunner._parse_args(args)
116         test_port = TestPort(host=MockHost(), options=options)
117         test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
118
119         runner = PerfTestsRunner(args=args, port=test_port)
120         runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
121         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
122         runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
123         return runner, test_port
124
125     def run_test(self, test_name):
126         runner, port = self.create_runner()
127         driver = MainTest.TestDriver()
128         return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
129
130     def test_run_passing_test(self):
131         self.assertTrue(self.run_test('pass.html'))
132
133     def test_run_silent_test(self):
134         self.assertFalse(self.run_test('silent.html'))
135
136     def test_run_failed_test(self):
137         self.assertFalse(self.run_test('failed.html'))
138
139     def test_run_tonguey_test(self):
140         self.assertFalse(self.run_test('tonguey.html'))
141
142     def test_run_timeout_test(self):
143         self.assertFalse(self.run_test('timeout.html'))
144
145     def test_run_crash_test(self):
146         self.assertFalse(self.run_test('crash.html'))
147
148     def _tests_for_runner(self, runner, test_names):
149         filesystem = runner._host.filesystem
150         tests = []
151         for test in test_names:
152             path = filesystem.join(runner._base_path, test)
153             dirname = filesystem.dirname(path)
154             if test.startswith('inspector/'):
155                 tests.append(ChromiumStylePerfTest(runner._port, test, path))
156             else:
157                 tests.append(PerfTest(runner._port, test, path))
158         return tests
159
160     def test_run_test_set(self):
161         runner, port = self.create_runner()
162         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
163             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
164         output = OutputCapture()
165         output.capture_output()
166         try:
167             unexpected_result_count = runner._run_tests_set(tests, port)
168         finally:
169             stdout, stderr, log = output.restore_output()
170         self.assertEqual(unexpected_result_count, len(tests) - 1)
171         self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
172
173     def test_run_test_set_kills_drt_per_run(self):
174
175         class TestDriverWithStopCount(MainTest.TestDriver):
176             stop_count = 0
177
178             def stop(self):
179                 TestDriverWithStopCount.stop_count += 1
180
181         runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
182
183         tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
184             'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
185         unexpected_result_count = runner._run_tests_set(tests, port)
186
187         self.assertEqual(TestDriverWithStopCount.stop_count, 6)
188
189     def test_run_test_pause_before_testing(self):
190         class TestDriverWithStartCount(MainTest.TestDriver):
191             start_count = 0
192
193             def start(self):
194                 TestDriverWithStartCount.start_count += 1
195
196         runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
197         tests = self._tests_for_runner(runner, ['inspector/pass.html'])
198
199         output = OutputCapture()
200         output.capture_output()
201         try:
202             unexpected_result_count = runner._run_tests_set(tests, port)
203             self.assertEqual(TestDriverWithStartCount.start_count, 1)
204         finally:
205             stdout, stderr, log = output.restore_output()
206         self.assertEqual(stderr, "Ready to run test?\n")
207         self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n")
208
209     def test_run_test_set_for_parser_tests(self):
210         runner, port = self.create_runner()
211         tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
212         output = OutputCapture()
213         output.capture_output()
214         try:
215             unexpected_result_count = runner._run_tests_set(tests, port)
216         finally:
217             stdout, stderr, log = output.restore_output()
218         self.assertEqual(unexpected_result_count, 0)
219         self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
220         'RESULT Bindings: event-target-wrapper= 1489.05 ms',
221         'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
222         '',
223         'Running Parser/some-parser.html (2 of 2)',
224         'RESULT Parser: some-parser= 1100.0 ms',
225         'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
226         '', '']))
227
228     def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
229         filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
230         filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
231
232         uploaded = [False]
233
234         def mock_upload_json(hostname, json_path):
235             self.assertEqual(hostname, 'some.host')
236             self.assertEqual(json_path, '/mock-checkout/output.json')
237             uploaded[0] = True
238             return upload_suceeds
239
240         runner._upload_json = mock_upload_json
241         runner._timestamp = 123456789
242         output_capture = OutputCapture()
243         output_capture.capture_output()
244         try:
245             self.assertEqual(runner.run(), expected_exit_code)
246         finally:
247             stdout, stderr, logs = output_capture.restore_output()
248
249         self.assertEqual(logs, '\n'.join([
250             'Running 2 tests',
251             'Running Bindings/event-target-wrapper.html (1 of 2)',
252             'RESULT Bindings: event-target-wrapper= 1489.05 ms',
253             'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
254             '',
255             'Running inspector/pass.html (2 of 2)',
256             'RESULT group_name: test_name= 42 ms',
257             '',
258             '']))
259
260         return uploaded[0]
261
262     def test_run_with_json_output(self):
263         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
264             '--test-results-server=some.host'])
265         self._test_run_with_json_output(runner, port.host.filesystem)
266         self.assertEqual(json.loads(port.host.filesystem.read_text_file('/mock-checkout/output.json')), {
267             "timestamp": 123456789, "results":
268             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
269             "inspector/pass.html:group_name:test_name": 42},
270             "webkit-revision": 5678, "branch": "webkit-trunk"})
271
272     def test_run_generates_results_page(self):
273         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
274         filesystem = port.host.filesystem
275         print runner._base_path + '/resources/results-template.html'
276         filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
277             'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
278         filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js',
279             'jquery content')
280
281         self._test_run_with_json_output(runner, filesystem)
282
283         expected_entry = {"timestamp": 123456789, "results": {"Bindings/event-target-wrapper":
284             {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
285             "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678}
286
287         self.maxDiff = None
288         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
289         self.assertEqual(json.loads(json_output), [expected_entry])
290         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
291             'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
292
293         self._test_run_with_json_output(runner, filesystem)
294         json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
295         self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
296         self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
297             'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
298
299     def test_run_with_json_source(self):
300         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
301             '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
302         port.host.filesystem.write_text_file('/mock-checkout/source.json', '{"key": "value"}')
303         self._test_run_with_json_output(runner, port.host.filesystem)
304         self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
305             "timestamp": 123456789, "results":
306             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
307             "inspector/pass.html:group_name:test_name": 42},
308             "webkit-revision": 5678, "branch": "webkit-trunk",
309             "key": "value"})
310
311     def test_run_with_multiple_repositories(self):
312         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
313             '--test-results-server=some.host'])
314         port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
315         self._test_run_with_json_output(runner, port.host.filesystem)
316         self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
317             "timestamp": 123456789, "results":
318             {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
319             "inspector/pass.html:group_name:test_name": 42.0},
320             "webkit-revision": 5678, "some-revision": 5678, "branch": "webkit-trunk"})
321
322     def test_run_with_upload_json(self):
323         runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
324             '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
325
326         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
327         generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
328         self.assertEqual(generated_json['platform'], 'platform1')
329         self.assertEqual(generated_json['builder-name'], 'builder1')
330         self.assertEqual(generated_json['build-number'], 123)
331
332         self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=-3)
333
334     def test_upload_json(self):
335         runner, port = self.create_runner()
336         port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
337
338         called = []
339         upload_single_text_file_throws = False
340         upload_single_text_file_return_value = StringIO.StringIO('OK')
341
342         class MockFileUploader:
343             def __init__(mock, url, timeout):
344                 self.assertEqual(url, 'https://some.host/api/test/report')
345                 self.assertTrue(isinstance(timeout, int) and timeout)
346                 called.append('FileUploader')
347
348             def upload_single_text_file(mock, filesystem, content_type, filename):
349                 self.assertEqual(filesystem, port.host.filesystem)
350                 self.assertEqual(content_type, 'application/json')
351                 self.assertEqual(filename, 'some.json')
352                 called.append('upload_single_text_file')
353                 if upload_single_text_file_throws:
354                     raise "Some exception"
355                 return upload_single_text_file_return_value
356
357         runner._upload_json('some.host', 'some.json', MockFileUploader)
358         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
359
360         output = OutputCapture()
361         output.capture_output()
362         upload_single_text_file_return_value = StringIO.StringIO('Some error')
363         runner._upload_json('some.host', 'some.json', MockFileUploader)
364         _, _, logs = output.restore_output()
365         self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
366
367         # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
368         called = []
369         upload_single_text_file_throws = True
370         runner._upload_json('some.host', 'some.json', MockFileUploader)
371         self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
372
373     def _add_file(self, runner, dirname, filename, content=True):
374         dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
375         runner._host.filesystem.maybe_make_directory(dirname)
376         runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
377
378     def test_collect_tests(self):
379         runner, port = self.create_runner()
380         self._add_file(runner, 'inspector', 'a_file.html', 'a content')
381         tests = runner._collect_tests()
382         self.assertEqual(len(tests), 1)
383
384     def _collect_tests_and_sort_test_name(self, runner):
385         return sorted([test.test_name() for test in runner._collect_tests()])
386
387     def test_collect_tests_with_multile_files(self):
388         runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
389
390         def add_file(filename):
391             port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
392
393         add_file('test1.html')
394         add_file('test2.html')
395         add_file('test3.html')
396         port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
397         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
398
399     def test_collect_tests_with_skipped_list(self):
400         runner, port = self.create_runner()
401
402         self._add_file(runner, 'inspector', 'test1.html')
403         self._add_file(runner, 'inspector', 'unsupported_test1.html')
404         self._add_file(runner, 'inspector', 'test2.html')
405         self._add_file(runner, 'inspector/resources', 'resource_file.html')
406         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
407         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
408         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
409
410     def test_collect_tests_with_skipped_list(self):
411         runner, port = self.create_runner(args=['--force'])
412
413         self._add_file(runner, 'inspector', 'test1.html')
414         self._add_file(runner, 'inspector', 'unsupported_test1.html')
415         self._add_file(runner, 'inspector', 'test2.html')
416         self._add_file(runner, 'inspector/resources', 'resource_file.html')
417         self._add_file(runner, 'unsupported', 'unsupported_test2.html')
418         port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
419         self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
420
421     def test_collect_tests_with_page_load_svg(self):
422         runner, port = self.create_runner()
423         self._add_file(runner, 'PageLoad', 'some-svg-test.svg')
424         tests = runner._collect_tests()
425         self.assertEqual(len(tests), 1)
426         self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
427
428     def test_collect_tests_should_ignore_replay_tests_by_default(self):
429         runner, port = self.create_runner()
430         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
431         self.assertEqual(runner._collect_tests(), [])
432
433     def test_collect_tests_with_replay_tests(self):
434         runner, port = self.create_runner(args=['--replay'])
435         self._add_file(runner, 'Replay', 'www.webkit.org.replay')
436         tests = runner._collect_tests()
437         self.assertEqual(len(tests), 1)
438         self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
439
440     def test_parse_args(self):
441         runner, port = self.create_runner()
442         options, args = PerfTestsRunner._parse_args([
443                 '--build-directory=folder42',
444                 '--platform=platform42',
445                 '--builder-name', 'webkit-mac-1',
446                 '--build-number=56',
447                 '--time-out-ms=42',
448                 '--output-json-path=a/output.json',
449                 '--source-json-path=a/source.json',
450                 '--test-results-server=somehost',
451                 '--debug'])
452         self.assertEqual(options.build, True)
453         self.assertEqual(options.build_directory, 'folder42')
454         self.assertEqual(options.platform, 'platform42')
455         self.assertEqual(options.builder_name, 'webkit-mac-1')
456         self.assertEqual(options.build_number, '56')
457         self.assertEqual(options.time_out_ms, '42')
458         self.assertEqual(options.configuration, 'Debug')
459         self.assertEqual(options.output_json_path, 'a/output.json')
460         self.assertEqual(options.source_json_path, 'a/source.json')
461         self.assertEqual(options.test_results_server, 'somehost')
462
463
464 if __name__ == '__main__':
465     unittest.main()