NRWT: Add the ability to upload test results to new test results server
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42
43 from webkitpy.common.net.file_uploader import FileUploader
44 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
45 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
46 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
47 from webkitpy.layout_tests.layout_package import json_layout_results_generator
48 from webkitpy.layout_tests.layout_package import json_results_generator
49 from webkitpy.layout_tests.models import test_expectations
50 from webkitpy.layout_tests.models import test_failures
51 from webkitpy.layout_tests.models import test_run_results
52 from webkitpy.layout_tests.models.test_input import TestInput
53
54 _log = logging.getLogger(__name__)
55
56 # Builder base URL where we have the archived test results.
57 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
58
59 TestExpectations = test_expectations.TestExpectations
60
61
62
63 class Manager(object):
64     """A class for managing running a series of tests on a series of layout
65     test files."""
66
67     def __init__(self, port, options, printer):
68         """Initialize test runner data structures.
69
70         Args:
71           port: an object implementing port-specific
72           options: a dictionary of command line options
73           printer: a Printer object to record updates to.
74         """
75         self._port = port
76         self._filesystem = port.host.filesystem
77         self._options = options
78         self._printer = printer
79         self._expectations = None
80
81         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
82         self.PERF_SUBDIR = 'perf'
83         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
84         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
85
86         # disable wss server. need to install pyOpenSSL on buildbots.
87         # self._websocket_secure_server = websocket_server.PyWebSocket(
88         #        options.results_directory, use_tls=True, port=9323)
89
90         self._results_directory = self._port.results_directory()
91         self._finder = LayoutTestFinder(self._port, self._options)
92         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
93
94     def _collect_tests(self, args):
95         return self._finder.find_tests(self._options, args)
96
97     def _is_http_test(self, test):
98         return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
99
100     def _is_websocket_test(self, test):
101         return self.WEBSOCKET_SUBDIR in test
102
103     def _http_tests(self, test_names):
104         return set(test for test in test_names if self._is_http_test(test))
105
106     def _is_perf_test(self, test):
107         return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
108
109     def _prepare_lists(self, paths, test_names):
110         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
111         tests_to_run = [test for test in test_names if test not in tests_to_skip]
112
113         # Create a sorted list of test files so the subset chunk,
114         # if used, contains alphabetically consecutive tests.
115         if self._options.order == 'natural':
116             tests_to_run.sort(key=self._port.test_key)
117         elif self._options.order == 'random':
118             random.shuffle(tests_to_run)
119
120         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
121         self._expectations.add_skipped_tests(tests_in_other_chunks)
122         tests_to_skip.update(tests_in_other_chunks)
123
124         return tests_to_run, tests_to_skip
125
126     def _test_input_for_file(self, test_file):
127         return TestInput(test_file,
128             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
129             self._test_requires_lock(test_file))
130
131     def _test_requires_lock(self, test_file):
132         """Return True if the test needs to be locked when
133         running multiple copies of NRWTs. Perf tests are locked
134         because heavy load caused by running other tests in parallel
135         might cause some of them to timeout."""
136         return self._is_http_test(test_file) or self._is_perf_test(test_file)
137
138     def _test_is_slow(self, test_file):
139         return self._expectations.has_modifier(test_file, test_expectations.SLOW)
140
141     def needs_servers(self, test_names):
142         return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http
143
144     def _set_up_run(self, test_names):
145         self._printer.write_update("Checking build ...")
146         if not self._port.check_build(self.needs_servers(test_names)):
147             _log.error("Build check failed")
148             return False
149
150         # This must be started before we check the system dependencies,
151         # since the helper may do things to make the setup correct.
152         if self._options.pixel_tests:
153             self._printer.write_update("Starting pixel test helper ...")
154             self._port.start_helper()
155
156         # Check that the system dependencies (themes, fonts, ...) are correct.
157         if not self._options.nocheck_sys_deps:
158             self._printer.write_update("Checking system dependencies ...")
159             if not self._port.check_sys_deps(self.needs_servers(test_names)):
160                 self._port.stop_helper()
161                 return False
162
163         if self._options.clobber_old_results:
164             self._clobber_old_results()
165
166         # Create the output directory if it doesn't already exist.
167         self._port.host.filesystem.maybe_make_directory(self._results_directory)
168
169         self._port.setup_test_run()
170         return True
171
172     def run(self, args):
173         """Run the tests and return a RunDetails object with the results."""
174         self._printer.write_update("Collecting tests ...")
175         try:
176             paths, test_names = self._collect_tests(args)
177         except IOError:
178             # This is raised if --test-list doesn't exist
179             return test_run_results.RunDetails(exit_code=-1)
180
181         self._printer.write_update("Parsing expectations ...")
182         self._expectations = test_expectations.TestExpectations(self._port, test_names)
183
184         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
185         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
186
187         # Check to make sure we're not skipping every test.
188         if not tests_to_run:
189             _log.critical('No tests to run.')
190             return test_run_results.RunDetails(exit_code=-1)
191
192         if not self._set_up_run(tests_to_run):
193             return test_run_results.RunDetails(exit_code=-1)
194
195         start_time = time.time()
196         enabled_pixel_tests_in_retry = False
197         try:
198             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
199                 int(self._options.child_processes), retrying=False)
200
201             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
202             if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
203                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
204
205                 _log.info('')
206                 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
207                 _log.info('')
208                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
209                     num_workers=1, retrying=True)
210
211                 if enabled_pixel_tests_in_retry:
212                     self._options.pixel_tests = False
213             else:
214                 retry_results = None
215         finally:
216             self._clean_up_run()
217
218         end_time = time.time()
219
220         # Some crash logs can take a long time to be written out so look
221         # for new logs after the test run finishes.
222         _log.debug("looking for new crash logs")
223         self._look_for_new_crash_logs(initial_results, start_time)
224         if retry_results:
225             self._look_for_new_crash_logs(retry_results, start_time)
226
227         _log.debug("summarizing results")
228         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
229         results_including_passes = None
230         if self._options.results_server_host:
231             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True)
232         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
233
234         if not self._options.dry_run:
235             self._port.print_leaks_summary()
236             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
237
238             results_path = self._filesystem.join(self._results_directory, "results.html")
239             self._copy_results_html_file(results_path)
240             if self._options.show_results and (initial_results.unexpected_results_by_name or
241                                                (self._options.full_results_html and initial_results.total_failures)):
242                 self._port.show_results_html_file(results_path)
243
244         return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
245                                            summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
246
247     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
248         needs_http = any(self._is_http_test(test) for test in tests_to_run)
249         needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
250
251         test_inputs = []
252         for _ in xrange(iterations):
253             for test in tests_to_run:
254                 for _ in xrange(repeat_each):
255                     test_inputs.append(self._test_input_for_file(test))
256         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying)
257
258     def _clean_up_run(self):
259         _log.debug("Flushing stdout")
260         sys.stdout.flush()
261         _log.debug("Flushing stderr")
262         sys.stderr.flush()
263         _log.debug("Stopping helper")
264         self._port.stop_helper()
265         _log.debug("Cleaning up port")
266         self._port.clean_up_test_run()
267
268     def _force_pixel_tests_if_needed(self):
269         if self._options.pixel_tests:
270             return False
271
272         _log.debug("Restarting helper")
273         self._port.stop_helper()
274         self._options.pixel_tests = True
275         self._port.start_helper()
276
277         return True
278
279     def _look_for_new_crash_logs(self, run_results, start_time):
280         """Since crash logs can take a long time to be written out if the system is
281            under stress do a second pass at the end of the test run.
282
283            run_results: the results of the test run
284            start_time: time the tests started at.  We're looking for crash
285                logs after that time.
286         """
287         crashed_processes = []
288         for test, result in run_results.unexpected_results_by_name.iteritems():
289             if (result.type != test_expectations.CRASH):
290                 continue
291             for failure in result.failures:
292                 if not isinstance(failure, test_failures.FailureCrash):
293                     continue
294                 crashed_processes.append([test, failure.process_name, failure.pid])
295
296         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
297         if sample_files:
298             for test, sample_file in sample_files.iteritems():
299                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
300                 writer.copy_sample_file(sample_file)
301
302         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
303         if crash_logs:
304             for test, crash_log in crash_logs.iteritems():
305                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
306                 writer.write_crash_log(crash_log)
307
308     def _clobber_old_results(self):
309         # Just clobber the actual test results directories since the other
310         # files in the results directory are explicitly used for cross-run
311         # tracking.
312         self._printer.write_update("Clobbering old results in %s" %
313                                    self._results_directory)
314         layout_tests_dir = self._port.layout_tests_dir()
315         possible_dirs = self._port.test_dirs()
316         for dirname in possible_dirs:
317             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
318                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
319
320     def _tests_to_retry(self, run_results, include_crashes):
321         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
322                    ((result.type != test_expectations.PASS) and
323                     (result.type != test_expectations.MISSING) and
324                     (result.type != test_expectations.CRASH or include_crashes))]
325
326     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
327         """Writes the results of the test run as JSON files into the results
328         dir and upload the files to the appengine server.
329
330         Args:
331           summarized_results: dict of results
332           initial_results: full summary object
333         """
334         _log.debug("Writing JSON files in %s." % self._results_directory)
335
336         # FIXME: Upload stats.json to the server and delete times_ms.
337         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
338         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
339         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
340
341         stats_trie = self._stats_trie(initial_results)
342         stats_path = self._filesystem.join(self._results_directory, "stats.json")
343         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
344
345         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
346         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
347         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
348
349         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
350         if results_including_passes:
351             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
352
353         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
354             self._port, self._options.builder_name, self._options.build_name,
355             self._options.build_number, self._results_directory,
356             BUILDER_BASE_URL,
357             self._expectations, initial_results,
358             self._options.test_results_server,
359             "layout-tests",
360             self._options.master_name)
361
362         _log.debug("Finished writing JSON files.")
363
364
365         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
366
367         generator.upload_json_files(json_files)
368         if results_including_passes:
369             self.upload_results(results_json_path, start_time, end_time)
370
371         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
372
373         # Remove these files from the results directory so they don't take up too much space on the buildbot.
374         # The tools use the version we uploaded to the results server anyway.
375         self._filesystem.remove(times_json_path)
376         self._filesystem.remove(incremental_results_path)
377         if results_including_passes:
378             self._filesystem.remove(results_json_path)
379
380     def upload_results(self, results_json_path, start_time, end_time):
381         host = self._options.results_server_host
382         if not host:
383             return
384         master_name = self._options.master_name
385         builder_name = self._options.builder_name
386         build_number = self._options.build_number
387         build_slave = self._options.build_slave
388         got_revision = self._options.got_revision
389         if not master_name or not builder_name or not build_number or not build_slave or not got_revision:
390             _log.error("--results-dashboard-host was set, but --master-name, --builder-name, --build-number, --build-slave, or --got-revision was not. Not uploading JSON files.")
391             return
392
393         _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, host)
394
395         attrs = [
396             ('master', master_name),
397             ('builder_name', builder_name),
398             ('build_number', build_number),
399             ('build_slave', build_slave),
400             ('revision', got_revision),
401             ('start_time', str(start_time)),
402             ('end_time', str(end_time)),
403         ]
404
405         uploader = FileUploader("http://%s/api/report" % host, 360)
406         try:
407             response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
408             if not response:
409                 _log.error("JSON upload failed; no response returned")
410                 return
411
412             if response.code != 200:
413                 _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
414                 return
415
416             response_text = response.read()
417             try:
418                 response_json = json.loads(response_text)
419             except ValueError, error:
420                 _log.error("JSON upload failed; failed to parse the response: %s", response_text)
421                 return
422
423             if response_json['status'] != 'OK':
424                 _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
425                 return
426
427             _log.info("JSON uploaded.")
428         except Exception, error:
429             _log.error("Upload failed: %s" % error)
430             return
431
432     def _copy_results_html_file(self, destination_path):
433         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
434         results_file = self._filesystem.join(base_dir, 'results.html')
435         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
436         # so make sure it exists before we try to copy it.
437         if self._filesystem.exists(results_file):
438             self._filesystem.copyfile(results_file, destination_path)
439
440     def _stats_trie(self, initial_results):
441         def _worker_number(worker_name):
442             return int(worker_name.split('/')[1]) if worker_name else -1
443
444         stats = {}
445         for result in initial_results.results_by_name.values():
446             if result.type != test_expectations.SKIP:
447                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
448         stats_trie = {}
449         for name, value in stats.iteritems():
450             json_results_generator.add_path_to_trie(name, value, stats_trie)
451         return stats_trie