Run tests as if they are expected to pass when --force is given.
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42
43 from webkitpy.common.checkout.scm.detection import SCMDetector
44 from webkitpy.common.net.file_uploader import FileUploader
45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
48 from webkitpy.layout_tests.layout_package import json_layout_results_generator
49 from webkitpy.layout_tests.layout_package import json_results_generator
50 from webkitpy.layout_tests.models import test_expectations
51 from webkitpy.layout_tests.models import test_failures
52 from webkitpy.layout_tests.models import test_run_results
53 from webkitpy.layout_tests.models.test_input import TestInput
54
55 _log = logging.getLogger(__name__)
56
57 # Builder base URL where we have the archived test results.
58 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
59
60 TestExpectations = test_expectations.TestExpectations
61
62
63
64 class Manager(object):
65     """A class for managing running a series of tests on a series of layout
66     test files."""
67
68     def __init__(self, port, options, printer):
69         """Initialize test runner data structures.
70
71         Args:
72           port: an object implementing port-specific
73           options: a dictionary of command line options
74           printer: a Printer object to record updates to.
75         """
76         self._port = port
77         self._filesystem = port.host.filesystem
78         self._options = options
79         self._printer = printer
80         self._expectations = None
81
82         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
83         self.PERF_SUBDIR = 'perf'
84         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
85         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
86
87         # disable wss server. need to install pyOpenSSL on buildbots.
88         # self._websocket_secure_server = websocket_server.PyWebSocket(
89         #        options.results_directory, use_tls=True, port=9323)
90
91         self._results_directory = self._port.results_directory()
92         self._finder = LayoutTestFinder(self._port, self._options)
93         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
94
95     def _collect_tests(self, args):
96         return self._finder.find_tests(self._options, args)
97
98     def _is_http_test(self, test):
99         return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
100
101     def _is_websocket_test(self, test):
102         return self.WEBSOCKET_SUBDIR in test
103
104     def _http_tests(self, test_names):
105         return set(test for test in test_names if self._is_http_test(test))
106
107     def _is_perf_test(self, test):
108         return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
109
110     def _prepare_lists(self, paths, test_names):
111         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
112         tests_to_run = [test for test in test_names if test not in tests_to_skip]
113
114         # Create a sorted list of test files so the subset chunk,
115         # if used, contains alphabetically consecutive tests.
116         if self._options.order == 'natural':
117             tests_to_run.sort(key=self._port.test_key)
118         elif self._options.order == 'random':
119             random.shuffle(tests_to_run)
120
121         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
122         self._expectations.add_skipped_tests(tests_in_other_chunks)
123         tests_to_skip.update(tests_in_other_chunks)
124
125         return tests_to_run, tests_to_skip
126
127     def _test_input_for_file(self, test_file):
128         return TestInput(test_file,
129             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
130             self._test_requires_lock(test_file))
131
132     def _test_requires_lock(self, test_file):
133         """Return True if the test needs to be locked when
134         running multiple copies of NRWTs. Perf tests are locked
135         because heavy load caused by running other tests in parallel
136         might cause some of them to timeout."""
137         return self._is_http_test(test_file) or self._is_perf_test(test_file)
138
139     def _test_is_slow(self, test_file):
140         return self._expectations.has_modifier(test_file, test_expectations.SLOW)
141
142     def needs_servers(self, test_names):
143         return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http
144
145     def _set_up_run(self, test_names):
146         self._printer.write_update("Checking build ...")
147         if not self._port.check_build(self.needs_servers(test_names)):
148             _log.error("Build check failed")
149             return False
150
151         # This must be started before we check the system dependencies,
152         # since the helper may do things to make the setup correct.
153         self._printer.write_update("Starting helper ...")
154         self._port.start_helper(self._options.pixel_tests)
155
156         # Check that the system dependencies (themes, fonts, ...) are correct.
157         if not self._options.nocheck_sys_deps:
158             self._printer.write_update("Checking system dependencies ...")
159             if not self._port.check_sys_deps(self.needs_servers(test_names)):
160                 self._port.stop_helper()
161                 return False
162
163         if self._options.clobber_old_results:
164             self._clobber_old_results()
165
166         # Create the output directory if it doesn't already exist.
167         self._port.host.filesystem.maybe_make_directory(self._results_directory)
168
169         self._port.setup_test_run()
170         return True
171
172     def run(self, args):
173         """Run the tests and return a RunDetails object with the results."""
174         self._printer.write_update("Collecting tests ...")
175         try:
176             paths, test_names = self._collect_tests(args)
177         except IOError:
178             # This is raised if --test-list doesn't exist
179             return test_run_results.RunDetails(exit_code=-1)
180
181         self._printer.write_update("Parsing expectations ...")
182         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
183
184         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
185         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
186
187         # Check to make sure we're not skipping every test.
188         if not tests_to_run:
189             _log.critical('No tests to run.')
190             return test_run_results.RunDetails(exit_code=-1)
191
192         if not self._set_up_run(tests_to_run):
193             return test_run_results.RunDetails(exit_code=-1)
194
195         start_time = time.time()
196         enabled_pixel_tests_in_retry = False
197         try:
198             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
199                 int(self._options.child_processes), retrying=False)
200
201             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
202             if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
203                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
204
205                 _log.info('')
206                 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
207                 _log.info('')
208                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
209                     num_workers=1, retrying=True)
210
211                 if enabled_pixel_tests_in_retry:
212                     self._options.pixel_tests = False
213             else:
214                 retry_results = None
215         finally:
216             self._clean_up_run()
217
218         end_time = time.time()
219
220         # Some crash logs can take a long time to be written out so look
221         # for new logs after the test run finishes.
222         _log.debug("looking for new crash logs")
223         self._look_for_new_crash_logs(initial_results, start_time)
224         if retry_results:
225             self._look_for_new_crash_logs(retry_results, start_time)
226
227         _log.debug("summarizing results")
228         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
229         results_including_passes = None
230         if self._options.results_server_host:
231             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
232         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
233
234         if not self._options.dry_run:
235             self._port.print_leaks_summary()
236             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
237
238             results_path = self._filesystem.join(self._results_directory, "results.html")
239             self._copy_results_html_file(results_path)
240             if self._options.show_results and (initial_results.unexpected_results_by_name or
241                                                (self._options.full_results_html and initial_results.total_failures)):
242                 self._port.show_results_html_file(results_path)
243
244         return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
245                                            summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
246
247     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
248         needs_http = any(self._is_http_test(test) for test in tests_to_run)
249         needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
250
251         test_inputs = []
252         for _ in xrange(iterations):
253             for test in tests_to_run:
254                 for _ in xrange(repeat_each):
255                     test_inputs.append(self._test_input_for_file(test))
256         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying)
257
258     def _clean_up_run(self):
259         _log.debug("Flushing stdout")
260         sys.stdout.flush()
261         _log.debug("Flushing stderr")
262         sys.stderr.flush()
263         _log.debug("Stopping helper")
264         self._port.stop_helper()
265         _log.debug("Cleaning up port")
266         self._port.clean_up_test_run()
267
268     def _force_pixel_tests_if_needed(self):
269         if self._options.pixel_tests:
270             return False
271
272         _log.debug("Restarting helper")
273         self._port.stop_helper()
274         self._options.pixel_tests = True
275         self._port.start_helper()
276
277         return True
278
279     def _look_for_new_crash_logs(self, run_results, start_time):
280         """Since crash logs can take a long time to be written out if the system is
281            under stress do a second pass at the end of the test run.
282
283            run_results: the results of the test run
284            start_time: time the tests started at.  We're looking for crash
285                logs after that time.
286         """
287         crashed_processes = []
288         for test, result in run_results.unexpected_results_by_name.iteritems():
289             if (result.type != test_expectations.CRASH):
290                 continue
291             for failure in result.failures:
292                 if not isinstance(failure, test_failures.FailureCrash):
293                     continue
294                 crashed_processes.append([test, failure.process_name, failure.pid])
295
296         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
297         if sample_files:
298             for test, sample_file in sample_files.iteritems():
299                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
300                 writer.copy_sample_file(sample_file)
301
302         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
303         if crash_logs:
304             for test, crash_log in crash_logs.iteritems():
305                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
306                 writer.write_crash_log(crash_log)
307
308     def _clobber_old_results(self):
309         # Just clobber the actual test results directories since the other
310         # files in the results directory are explicitly used for cross-run
311         # tracking.
312         self._printer.write_update("Clobbering old results in %s" %
313                                    self._results_directory)
314         layout_tests_dir = self._port.layout_tests_dir()
315         possible_dirs = self._port.test_dirs()
316         for dirname in possible_dirs:
317             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
318                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
319
320     def _tests_to_retry(self, run_results, include_crashes):
321         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
322                    ((result.type != test_expectations.PASS) and
323                     (result.type != test_expectations.MISSING) and
324                     (result.type != test_expectations.CRASH or include_crashes))]
325
326     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
327         """Writes the results of the test run as JSON files into the results
328         dir and upload the files to the appengine server.
329
330         Args:
331           summarized_results: dict of results
332           initial_results: full summary object
333         """
334         _log.debug("Writing JSON files in %s." % self._results_directory)
335
336         # FIXME: Upload stats.json to the server and delete times_ms.
337         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
338         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
339         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
340
341         stats_trie = self._stats_trie(initial_results)
342         stats_path = self._filesystem.join(self._results_directory, "stats.json")
343         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
344
345         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
346         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
347         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
348
349         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
350         if results_including_passes:
351             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
352
353         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
354             self._port, self._options.builder_name, self._options.build_name,
355             self._options.build_number, self._results_directory,
356             BUILDER_BASE_URL,
357             self._expectations, initial_results,
358             self._options.test_results_server,
359             "layout-tests",
360             self._options.master_name)
361
362         _log.debug("Finished writing JSON files.")
363
364
365         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
366
367         generator.upload_json_files(json_files)
368         if results_including_passes:
369             self.upload_results(results_json_path, start_time, end_time)
370
371         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
372
373         # Remove these files from the results directory so they don't take up too much space on the buildbot.
374         # The tools use the version we uploaded to the results server anyway.
375         self._filesystem.remove(times_json_path)
376         self._filesystem.remove(incremental_results_path)
377         if results_including_passes:
378             self._filesystem.remove(results_json_path)
379
380     def upload_results(self, results_json_path, start_time, end_time):
381         hostname = self._options.results_server_host
382         if not hostname:
383             return
384         master_name = self._options.master_name
385         builder_name = self._options.builder_name
386         build_number = self._options.build_number
387         build_slave = self._options.build_slave
388         if not master_name or not builder_name or not build_number or not build_slave:
389             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
390             return
391
392         revisions = {}
393         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
394         for (name, path) in self._port.repository_paths():
395             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
396             revision = scm.svn_revision(path)
397             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
398
399         _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
400
401         attrs = [
402             ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
403             ('builder_name', builder_name),
404             ('build_number', build_number),
405             ('build_slave', build_slave),
406             ('revisions', json.dumps(revisions)),
407             ('start_time', str(start_time)),
408             ('end_time', str(end_time)),
409         ]
410
411         uploader = FileUploader("http://%s/api/report" % hostname, 360)
412         try:
413             response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
414             if not response:
415                 _log.error("JSON upload failed; no response returned")
416                 return
417
418             if response.code != 200:
419                 _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
420                 return
421
422             response_text = response.read()
423             try:
424                 response_json = json.loads(response_text)
425             except ValueError, error:
426                 _log.error("JSON upload failed; failed to parse the response: %s", response_text)
427                 return
428
429             if response_json['status'] != 'OK':
430                 _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
431                 return
432
433             _log.info("JSON uploaded.")
434         except Exception, error:
435             _log.error("Upload failed: %s" % error)
436             return
437
438     def _copy_results_html_file(self, destination_path):
439         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
440         results_file = self._filesystem.join(base_dir, 'results.html')
441         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
442         # so make sure it exists before we try to copy it.
443         if self._filesystem.exists(results_file):
444             self._filesystem.copyfile(results_file, destination_path)
445
446     def _stats_trie(self, initial_results):
447         def _worker_number(worker_name):
448             return int(worker_name.split('/')[1]) if worker_name else -1
449
450         stats = {}
451         for result in initial_results.results_by_name.values():
452             if result.type != test_expectations.SKIP:
453                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
454         stats_trie = {}
455         for name, value in stats.iteritems():
456             json_results_generator.add_path_to_trie(name, value, stats_trie)
457         return stats_trie