2 # Copyright (C) 2012 Google Inc. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
14 # * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """Unit tests for run_perf_tests."""
36 from webkitpy.common.host_mock import MockHost
37 from webkitpy.common.system.filesystem_mock import MockFileSystem
38 from webkitpy.common.system.outputcapture import OutputCapture
39 from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
40 from webkitpy.layout_tests.port.test import TestPort
41 from webkitpy.layout_tests.views import printing
42 from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
43 from webkitpy.performance_tests.perftest import PerfTest
44 from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
47 class MainTest(unittest.TestCase):
48 def assertWritten(self, stream, contents):
49 self.assertEquals(stream.buflist, contents)
52 def run_test(self, driver_input):
56 if driver_input.test_name.endswith('pass.html'):
57 text = 'RESULT group_name: test_name= 42 ms'
58 elif driver_input.test_name.endswith('timeout.html'):
60 elif driver_input.test_name.endswith('failed.html'):
62 elif driver_input.test_name.endswith('tonguey.html'):
63 text = 'we are not expecting an output from perf tests but RESULT blablabla'
64 elif driver_input.test_name.endswith('crash.html'):
66 elif driver_input.test_name.endswith('event-target-wrapper.html'):
67 text = """Running 20 times
68 Ignoring warm-up run (1502)
96 elif driver_input.test_name.endswith('some-parser.html'):
97 text = """Running 20 times
98 Ignoring warm-up run (1115)
106 return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
114 def create_runner(self, args=[], driver_class=TestDriver):
115 options, parsed_args = PerfTestsRunner._parse_args(args)
116 test_port = TestPort(host=MockHost(), options=options)
117 test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
119 runner = PerfTestsRunner(args=args, port=test_port)
120 runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
121 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
122 runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
123 return runner, test_port
125 def run_test(self, test_name):
126 runner, port = self.create_runner()
127 driver = MainTest.TestDriver()
128 return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
130 def test_run_passing_test(self):
131 self.assertTrue(self.run_test('pass.html'))
133 def test_run_silent_test(self):
134 self.assertFalse(self.run_test('silent.html'))
136 def test_run_failed_test(self):
137 self.assertFalse(self.run_test('failed.html'))
139 def test_run_tonguey_test(self):
140 self.assertFalse(self.run_test('tonguey.html'))
142 def test_run_timeout_test(self):
143 self.assertFalse(self.run_test('timeout.html'))
145 def test_run_crash_test(self):
146 self.assertFalse(self.run_test('crash.html'))
148 def _tests_for_runner(self, runner, test_names):
149 filesystem = runner._host.filesystem
151 for test in test_names:
152 path = filesystem.join(runner._base_path, test)
153 dirname = filesystem.dirname(path)
154 if test.startswith('inspector/'):
155 tests.append(ChromiumStylePerfTest(runner._port, test, path))
157 tests.append(PerfTest(runner._port, test, path))
160 def test_run_test_set(self):
161 runner, port = self.create_runner()
162 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
163 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
164 output = OutputCapture()
165 output.capture_output()
167 unexpected_result_count = runner._run_tests_set(tests, port)
169 stdout, stderr, log = output.restore_output()
170 self.assertEqual(unexpected_result_count, len(tests) - 1)
171 self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
173 def test_run_test_set_kills_drt_per_run(self):
175 class TestDriverWithStopCount(MainTest.TestDriver):
179 TestDriverWithStopCount.stop_count += 1
181 runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
183 tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
184 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
185 unexpected_result_count = runner._run_tests_set(tests, port)
187 self.assertEqual(TestDriverWithStopCount.stop_count, 6)
189 def test_run_test_pause_before_testing(self):
190 class TestDriverWithStartCount(MainTest.TestDriver):
194 TestDriverWithStartCount.start_count += 1
196 runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
197 tests = self._tests_for_runner(runner, ['inspector/pass.html'])
199 output = OutputCapture()
200 output.capture_output()
202 unexpected_result_count = runner._run_tests_set(tests, port)
203 self.assertEqual(TestDriverWithStartCount.start_count, 1)
205 stdout, stderr, log = output.restore_output()
206 self.assertEqual(stderr, "Ready to run test?\n")
207 self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n")
209 def test_run_test_set_for_parser_tests(self):
210 runner, port = self.create_runner()
211 tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
212 output = OutputCapture()
213 output.capture_output()
215 unexpected_result_count = runner._run_tests_set(tests, port)
217 stdout, stderr, log = output.restore_output()
218 self.assertEqual(unexpected_result_count, 0)
219 self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
220 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
221 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
223 'Running Parser/some-parser.html (2 of 2)',
224 'RESULT Parser: some-parser= 1100.0 ms',
225 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
228 def test_run_test_set_with_json_output(self):
229 runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
230 port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
231 port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
232 runner._timestamp = 123456789
233 output_capture = OutputCapture()
234 output_capture.capture_output()
236 self.assertEqual(runner.run(), 0)
238 stdout, stderr, logs = output_capture.restore_output()
240 self.assertEqual(logs,
241 '\n'.join(['Running 2 tests',
242 'Running Bindings/event-target-wrapper.html (1 of 2)',
243 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
244 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
246 'Running inspector/pass.html (2 of 2)',
247 'RESULT group_name: test_name= 42 ms',
250 self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
251 "timestamp": 123456789, "results":
252 {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
253 "inspector/pass.html:group_name:test_name": 42},
254 "webkit-revision": 5678})
256 def test_run_test_set_with_json_source(self):
257 runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json', '--source-json-path=/mock-checkout/source.json'])
258 port.host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
259 port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
260 port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
261 runner._timestamp = 123456789
262 output_capture = OutputCapture()
263 output_capture.capture_output()
265 self.assertEqual(runner.run(), 0)
267 stdout, stderr, logs = output_capture.restore_output()
269 self.assertEqual(logs, '\n'.join(['Running 2 tests',
270 'Running Bindings/event-target-wrapper.html (1 of 2)',
271 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
272 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
274 'Running inspector/pass.html (2 of 2)',
275 'RESULT group_name: test_name= 42 ms',
278 self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
279 "timestamp": 123456789, "results":
280 {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
281 "inspector/pass.html:group_name:test_name": 42},
282 "webkit-revision": 5678,
285 def test_run_test_set_with_multiple_repositories(self):
286 runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
287 port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
288 runner._timestamp = 123456789
289 port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
290 self.assertEqual(runner.run(), 0)
291 self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
292 "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
294 def test_run_with_upload_json(self):
295 runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
296 '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
297 upload_json_is_called = [False]
298 upload_json_returns_true = True
300 def mock_upload_json(hostname, json_path):
301 self.assertEqual(hostname, 'some.host')
302 self.assertEqual(json_path, '/mock-checkout/output.json')
303 upload_json_is_called[0] = True
304 return upload_json_returns_true
306 runner._upload_json = mock_upload_json
307 port.host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
308 port.host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
309 port.host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
310 runner._timestamp = 123456789
311 self.assertEqual(runner.run(), 0)
312 self.assertEqual(upload_json_is_called[0], True)
313 generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
314 self.assertEqual(generated_json['platform'], 'platform1')
315 self.assertEqual(generated_json['builder-name'], 'builder1')
316 self.assertEqual(generated_json['build-number'], 123)
317 upload_json_returns_true = False
319 runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
320 '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
321 runner._upload_json = mock_upload_json
322 self.assertEqual(runner.run(), -3)
324 def test_upload_json(self):
325 runner, port = self.create_runner()
326 port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
329 upload_single_text_file_throws = False
330 upload_single_text_file_return_value = StringIO.StringIO('OK')
332 class MockFileUploader:
333 def __init__(mock, url, timeout):
334 self.assertEqual(url, 'https://some.host/api/test/report')
335 self.assertTrue(isinstance(timeout, int) and timeout)
336 called.append('FileUploader')
338 def upload_single_text_file(mock, filesystem, content_type, filename):
339 self.assertEqual(filesystem, port.host.filesystem)
340 self.assertEqual(content_type, 'application/json')
341 self.assertEqual(filename, 'some.json')
342 called.append('upload_single_text_file')
343 if upload_single_text_file_throws:
344 raise "Some exception"
345 return upload_single_text_file_return_value
347 runner._upload_json('some.host', 'some.json', MockFileUploader)
348 self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
350 output = OutputCapture()
351 output.capture_output()
352 upload_single_text_file_return_value = StringIO.StringIO('Some error')
353 runner._upload_json('some.host', 'some.json', MockFileUploader)
354 _, _, logs = output.restore_output()
355 self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
357 # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
359 upload_single_text_file_throws = True
360 runner._upload_json('some.host', 'some.json', MockFileUploader)
361 self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
363 def _add_file(self, runner, dirname, filename, content=True):
364 dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
365 runner._host.filesystem.maybe_make_directory(dirname)
366 runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
368 def test_collect_tests(self):
369 runner, port = self.create_runner()
370 self._add_file(runner, 'inspector', 'a_file.html', 'a content')
371 tests = runner._collect_tests()
372 self.assertEqual(len(tests), 1)
374 def _collect_tests_and_sort_test_name(self, runner):
375 return sorted([test.test_name() for test in runner._collect_tests()])
377 def test_collect_tests_with_multile_files(self):
378 runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
380 def add_file(filename):
381 port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
383 add_file('test1.html')
384 add_file('test2.html')
385 add_file('test3.html')
386 port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
387 self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
389 def test_collect_tests_with_skipped_list(self):
390 runner, port = self.create_runner()
392 self._add_file(runner, 'inspector', 'test1.html')
393 self._add_file(runner, 'inspector', 'unsupported_test1.html')
394 self._add_file(runner, 'inspector', 'test2.html')
395 self._add_file(runner, 'inspector/resources', 'resource_file.html')
396 self._add_file(runner, 'unsupported', 'unsupported_test2.html')
397 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
398 self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
400 def test_collect_tests_with_skipped_list(self):
401 runner, port = self.create_runner(args=['--force'])
403 self._add_file(runner, 'inspector', 'test1.html')
404 self._add_file(runner, 'inspector', 'unsupported_test1.html')
405 self._add_file(runner, 'inspector', 'test2.html')
406 self._add_file(runner, 'inspector/resources', 'resource_file.html')
407 self._add_file(runner, 'unsupported', 'unsupported_test2.html')
408 port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
409 self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
411 def test_collect_tests_with_page_load_svg(self):
412 runner, port = self.create_runner()
413 self._add_file(runner, 'PageLoad', 'some-svg-test.svg')
414 tests = runner._collect_tests()
415 self.assertEqual(len(tests), 1)
416 self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
418 def test_collect_tests_should_ignore_replay_tests_by_default(self):
419 runner, port = self.create_runner()
420 self._add_file(runner, 'Replay', 'www.webkit.org.replay')
421 self.assertEqual(runner._collect_tests(), [])
423 def test_collect_tests_with_replay_tests(self):
424 runner, port = self.create_runner(args=['--replay'])
425 self._add_file(runner, 'Replay', 'www.webkit.org.replay')
426 tests = runner._collect_tests()
427 self.assertEqual(len(tests), 1)
428 self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
430 def test_parse_args(self):
431 runner, port = self.create_runner()
432 options, args = PerfTestsRunner._parse_args([
433 '--build-directory=folder42',
434 '--platform=platform42',
435 '--builder-name', 'webkit-mac-1',
438 '--output-json-path=a/output.json',
439 '--source-json-path=a/source.json',
440 '--test-results-server=somehost',
442 self.assertEqual(options.build, True)
443 self.assertEqual(options.build_directory, 'folder42')
444 self.assertEqual(options.platform, 'platform42')
445 self.assertEqual(options.builder_name, 'webkit-mac-1')
446 self.assertEqual(options.build_number, '56')
447 self.assertEqual(options.time_out_ms, '42')
448 self.assertEqual(options.configuration, 'Debug')
449 self.assertEqual(options.output_json_path, 'a/output.json')
450 self.assertEqual(options.source_json_path, 'a/source.json')
451 self.assertEqual(options.test_results_server, 'somehost')
454 if __name__ == '__main__':