001cfdfcd8c8bc3662bd5bab83d9bf07bf6dbfda
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42
43 from webkitpy.common.checkout.scm.detection import SCMDetector
44 from webkitpy.common.net.file_uploader import FileUploader
45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
48 from webkitpy.layout_tests.layout_package import json_layout_results_generator
49 from webkitpy.layout_tests.layout_package import json_results_generator
50 from webkitpy.layout_tests.models import test_expectations
51 from webkitpy.layout_tests.models import test_failures
52 from webkitpy.layout_tests.models import test_results
53 from webkitpy.layout_tests.models import test_run_results
54 from webkitpy.layout_tests.models.test_input import TestInput
55 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
56 from webkitpy.tool.grammar import pluralize
57
58 _log = logging.getLogger(__name__)
59
60 TestExpectations = test_expectations.TestExpectations
61
62
63 class Manager(object):
64     """A class for managing running a series of tests on a series of layout
65     test files."""
66
67     def __init__(self, port, options, printer):
68         """Initialize test runner data structures.
69
70         Args:
71           port: an object implementing port-specific
72           options: a dictionary of command line options
73           printer: a Printer object to record updates to.
74         """
75         self._port = port
76         self._filesystem = port.host.filesystem
77         self._options = options
78         self._printer = printer
79         self._expectations = None
80         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
81         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
82         self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
83         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
84         self._results_directory = self._port.results_directory()
85         self._finder = LayoutTestFinder(self._port, self._options)
86         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
87
88     def _collect_tests(self, args):
89         return self._finder.find_tests(self._options, args)
90
91     def _is_http_test(self, test):
92         return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._is_web_platform_test(test)
93
94     def _is_websocket_test(self, test):
95         return self.WEBSOCKET_SUBDIR in test
96
97     def _is_web_platform_test(self, test):
98         return self.web_platform_test_subdir in test
99
100     def _http_tests(self, test_names):
101         return set(test for test in test_names if self._is_http_test(test))
102
103     def _prepare_lists(self, paths, test_names):
104         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
105         tests_to_run = [test for test in test_names if test not in tests_to_skip]
106
107         # Create a sorted list of test files so the subset chunk,
108         # if used, contains alphabetically consecutive tests.
109         if self._options.order == 'natural':
110             tests_to_run.sort(key=self._port.test_key)
111         elif self._options.order == 'random':
112             random.shuffle(tests_to_run)
113
114         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
115         self._expectations.add_skipped_tests(tests_in_other_chunks)
116         tests_to_skip.update(tests_in_other_chunks)
117
118         return tests_to_run, tests_to_skip
119
120     def _test_input_for_file(self, test_file):
121         return TestInput(test_file,
122             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
123             self._is_http_test(test_file))
124
125     def _test_is_slow(self, test_file):
126         return self._expectations.model().has_modifier(test_file, test_expectations.SLOW)
127
128     def needs_servers(self, test_names):
129         return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
130
131     def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
132         test_inputs = []
133         for _ in xrange(iterations):
134             for test in tests_to_run:
135                 for _ in xrange(repeat_each):
136                     test_inputs.append(self._test_input_for_file(test))
137         return test_inputs
138
139     def _update_worker_count(self, test_names):
140         test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
141         worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
142         self._options.child_processes = worker_count
143
144     def _set_up_run(self, test_names):
145         self._printer.write_update("Checking build ...")
146         if not self._port.check_build(self.needs_servers(test_names)):
147             _log.error("Build check failed")
148             return False
149
150         # This must be started before we check the system dependencies,
151         # since the helper may do things to make the setup correct.
152         self._printer.write_update("Starting helper ...")
153         if not self._port.start_helper(self._options.pixel_tests):
154             return False
155
156         self._update_worker_count(test_names)
157         self._port.reset_preferences()
158
159         # Check that the system dependencies (themes, fonts, ...) are correct.
160         if not self._options.nocheck_sys_deps:
161             self._printer.write_update("Checking system dependencies ...")
162             if not self._port.check_sys_deps(self.needs_servers(test_names)):
163                 self._port.stop_helper()
164                 return False
165
166         if self._options.clobber_old_results:
167             self._clobber_old_results()
168
169         # Create the output directory if it doesn't already exist.
170         self._port.host.filesystem.maybe_make_directory(self._results_directory)
171
172         self._port.setup_test_run()
173         return True
174
175     def run(self, args):
176         """Run the tests and return a RunDetails object with the results."""
177         self._printer.write_update("Collecting tests ...")
178         try:
179             paths, test_names = self._collect_tests(args)
180         except IOError:
181             # This is raised if --test-list doesn't exist
182             return test_run_results.RunDetails(exit_code=-1)
183
184         self._printer.write_update("Parsing expectations ...")
185         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
186         self._expectations.parse_all_expectations()
187
188         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
189         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
190         start_time = time.time()
191
192         # Check to make sure we're not skipping every test.
193         if not tests_to_run:
194             _log.critical('No tests to run.')
195             return test_run_results.RunDetails(exit_code=-1)
196
197         try:
198             if not self._set_up_run(tests_to_run):
199                 return test_run_results.RunDetails(exit_code=-1)
200
201             enabled_pixel_tests_in_retry = False
202             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
203                 int(self._options.child_processes), retrying=False)
204
205             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
206             # Don't retry failures when interrupted by user or failures limit exception.
207             retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
208             if retry_failures and tests_to_retry:
209                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
210
211                 _log.info('')
212                 _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
213                 _log.info('')
214                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
215                     num_workers=1, retrying=True)
216
217                 if enabled_pixel_tests_in_retry:
218                     self._options.pixel_tests = False
219             else:
220                 retry_results = None
221         finally:
222             self._clean_up_run()
223
224         end_time = time.time()
225
226         # Some crash logs can take a long time to be written out so look
227         # for new logs after the test run finishes.
228         _log.debug("looking for new crash logs")
229         self._look_for_new_crash_logs(initial_results, start_time)
230         if retry_results:
231             self._look_for_new_crash_logs(retry_results, start_time)
232
233         _log.debug("summarizing results")
234         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
235         results_including_passes = None
236         if self._options.results_server_host:
237             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
238         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
239
240         exit_code = -1
241         if not self._options.dry_run:
242             self._port.print_leaks_summary()
243             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
244
245             results_path = self._filesystem.join(self._results_directory, "results.html")
246             self._copy_results_html_file(results_path)
247             if initial_results.keyboard_interrupted:
248                 exit_code = INTERRUPTED_EXIT_STATUS
249             else:
250                 if self._options.show_results and (initial_results.unexpected_results_by_name or
251                     (self._options.full_results_html and initial_results.total_failures)):
252                     self._port.show_results_html_file(results_path)
253                 exit_code = self._port.exit_code_from_summarized_results(summarized_results)
254         return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
255
256     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
257         needs_http = any((self._is_http_test(test) and not self._is_web_platform_test(test)) for test in tests_to_run)
258         needs_web_platform_test_server = any(self._is_web_platform_test(test) for test in tests_to_run)
259         needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
260
261         test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)
262         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, needs_web_platform_test_server, retrying)
263
264     def _clean_up_run(self):
265         _log.debug("Flushing stdout")
266         sys.stdout.flush()
267         _log.debug("Flushing stderr")
268         sys.stderr.flush()
269         _log.debug("Stopping helper")
270         self._port.stop_helper()
271         _log.debug("Cleaning up port")
272         self._port.clean_up_test_run()
273
274     def _force_pixel_tests_if_needed(self):
275         if self._options.pixel_tests:
276             return False
277
278         _log.debug("Restarting helper")
279         self._port.stop_helper()
280         self._options.pixel_tests = True
281         return self._port.start_helper()
282
283     def _look_for_new_crash_logs(self, run_results, start_time):
284         """Since crash logs can take a long time to be written out if the system is
285            under stress do a second pass at the end of the test run.
286
287            run_results: the results of the test run
288            start_time: time the tests started at.  We're looking for crash
289                logs after that time.
290         """
291         crashed_processes = []
292         for test, result in run_results.unexpected_results_by_name.iteritems():
293             if (result.type != test_expectations.CRASH):
294                 continue
295             for failure in result.failures:
296                 if not isinstance(failure, test_failures.FailureCrash):
297                     continue
298                 crashed_processes.append([test, failure.process_name, failure.pid])
299
300         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
301         if sample_files:
302             for test, sample_file in sample_files.iteritems():
303                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
304                 writer.copy_sample_file(sample_file)
305
306         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
307         if crash_logs:
308             for test, crash_log in crash_logs.iteritems():
309                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
310                 writer.write_crash_log(crash_log)
311
312                 # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
313                 if not any(process[0] == test for process in crashed_processes):
314                     result = test_results.TestResult(test)
315                     result.type = test_expectations.CRASH
316                     result.is_other_crash = True
317                     run_results.add(result, expected=False, test_is_slow=False)
318                     _log.debug("Adding results for other crash: " + str(test))
319
320     def _clobber_old_results(self):
321         # Just clobber the actual test results directories since the other
322         # files in the results directory are explicitly used for cross-run
323         # tracking.
324         self._printer.write_update("Clobbering old results in %s" %
325                                    self._results_directory)
326         layout_tests_dir = self._port.layout_tests_dir()
327         possible_dirs = self._port.test_dirs()
328         for dirname in possible_dirs:
329             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
330                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
331
332     def _tests_to_retry(self, run_results, include_crashes):
333         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
334                    ((result.type != test_expectations.PASS) and
335                     (result.type != test_expectations.MISSING) and
336                     (result.type != test_expectations.CRASH or include_crashes))]
337
338     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
339         """Writes the results of the test run as JSON files into the results
340         dir and upload the files to the appengine server.
341
342         Args:
343           summarized_results: dict of results
344           initial_results: full summary object
345         """
346         _log.debug("Writing JSON files in %s." % self._results_directory)
347
348         # FIXME: Upload stats.json to the server and delete times_ms.
349         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
350         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
351         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
352
353         stats_trie = self._stats_trie(initial_results)
354         stats_path = self._filesystem.join(self._results_directory, "stats.json")
355         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
356
357         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
358         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
359         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
360
361         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
362         if results_including_passes:
363             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
364
365         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
366             self._port, self._options.builder_name, self._options.build_name,
367             self._options.build_number, self._results_directory,
368             self._expectations, initial_results,
369             self._options.test_results_server,
370             "layout-tests",
371             self._options.master_name)
372
373         if generator.generate_json_output():
374             _log.debug("Finished writing JSON file for the test results server.")
375         else:
376             _log.debug("Failed to generate JSON file for the test results server.")
377             return
378
379         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
380
381         generator.upload_json_files(json_files)
382         if results_including_passes:
383             self.upload_results(results_json_path, start_time, end_time)
384
385         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
386
387         # Remove these files from the results directory so they don't take up too much space on the buildbot.
388         # The tools use the version we uploaded to the results server anyway.
389         self._filesystem.remove(times_json_path)
390         self._filesystem.remove(incremental_results_path)
391         if results_including_passes:
392             self._filesystem.remove(results_json_path)
393
394     def upload_results(self, results_json_path, start_time, end_time):
395         hostname = self._options.results_server_host
396         if not hostname:
397             return
398         master_name = self._options.master_name
399         builder_name = self._options.builder_name
400         build_number = self._options.build_number
401         build_slave = self._options.build_slave
402         if not master_name or not builder_name or not build_number or not build_slave:
403             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
404             return
405
406         revisions = {}
407         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
408         for (name, path) in self._port.repository_paths():
409             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
410             revision = scm.svn_revision(path)
411             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
412
413         _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
414
415         attrs = [
416             ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
417             ('builder_name', builder_name),
418             ('build_number', build_number),
419             ('build_slave', build_slave),
420             ('revisions', json.dumps(revisions)),
421             ('start_time', str(start_time)),
422             ('end_time', str(end_time)),
423         ]
424
425         uploader = FileUploader("http://%s/api/report" % hostname, 360)
426         try:
427             response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
428             if not response:
429                 _log.error("JSON upload failed; no response returned")
430                 return
431
432             if response.code != 200:
433                 _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
434                 return
435
436             response_text = response.read()
437             try:
438                 response_json = json.loads(response_text)
439             except ValueError, error:
440                 _log.error("JSON upload failed; failed to parse the response: %s", response_text)
441                 return
442
443             if response_json['status'] != 'OK':
444                 _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
445                 return
446
447             _log.info("JSON uploaded.")
448         except Exception, error:
449             _log.error("Upload failed: %s" % error)
450             return
451
452     def _copy_results_html_file(self, destination_path):
453         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
454         results_file = self._filesystem.join(base_dir, 'results.html')
455         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
456         # so make sure it exists before we try to copy it.
457         if self._filesystem.exists(results_file):
458             self._filesystem.copyfile(results_file, destination_path)
459
460     def _stats_trie(self, initial_results):
461         def _worker_number(worker_name):
462             return int(worker_name.split('/')[1]) if worker_name else -1
463
464         stats = {}
465         for result in initial_results.results_by_name.values():
466             if result.type != test_expectations.SKIP:
467                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
468         stats_trie = {}
469         for name, value in stats.iteritems():
470             json_results_generator.add_path_to_trie(name, value, stats_trie)
471         return stats_trie