run-webkit-tests: Upload test results (new results database)
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42 from collections import defaultdict, OrderedDict
43
44 from webkitpy.common.checkout.scm.detection import SCMDetector
45 from webkitpy.common.net.file_uploader import FileUploader
46 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
47 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
48 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
49 from webkitpy.layout_tests.layout_package import json_layout_results_generator
50 from webkitpy.layout_tests.layout_package import json_results_generator
51 from webkitpy.layout_tests.models import test_expectations
52 from webkitpy.layout_tests.models import test_failures
53 from webkitpy.layout_tests.models import test_results
54 from webkitpy.layout_tests.models import test_run_results
55 from webkitpy.layout_tests.models.test_input import TestInput
56 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
57 from webkitpy.tool.grammar import pluralize
58 from webkitpy.results.upload import Upload
59 from webkitpy.xcode.device_type import DeviceType
60
61 _log = logging.getLogger(__name__)
62
63 TestExpectations = test_expectations.TestExpectations
64
65
66 class Manager(object):
67     """A class for managing running a series of tests on a series of layout
68     test files."""
69
70     def __init__(self, port, options, printer):
71         """Initialize test runner data structures.
72
73         Args:
74           port: an object implementing port-specific
75           options: a dictionary of command line options
76           printer: a Printer object to record updates to.
77         """
78         self._port = port
79         self._filesystem = port.host.filesystem
80         self._options = options
81         self._printer = printer
82         self._expectations = OrderedDict()
83         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
84         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
85         self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
86         self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
87         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
88         self._results_directory = self._port.results_directory()
89         self._finder = LayoutTestFinder(self._port, self._options)
90         self._runner = None
91
92         test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
93         self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}
94
95     def _collect_tests(self, args, device_type=None):
96         return self._finder.find_tests(self._options, args, device_type=device_type)
97
98     def _is_http_test(self, test):
99         return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)
100
101     def _is_websocket_test(self, test):
102         return self.WEBSOCKET_SUBDIR in test
103
104     def _needs_web_platform_test(self, test):
105         return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test
106
107     def _http_tests(self, test_names):
108         return set(test for test in test_names if self._is_http_test(test))
109
110     def _prepare_lists(self, paths, test_names, device_type=None):
111         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations[device_type], self._http_tests(test_names))
112         tests_to_run = [test for test in test_names if test not in tests_to_skip]
113
114         # Create a sorted list of test files so the subset chunk,
115         # if used, contains alphabetically consecutive tests.
116         if self._options.order == 'natural':
117             tests_to_run.sort(key=self._port.test_key)
118         elif self._options.order == 'random':
119             random.shuffle(tests_to_run)
120
121         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
122         self._expectations[device_type].add_skipped_tests(tests_in_other_chunks)
123         tests_to_skip.update(tests_in_other_chunks)
124
125         return tests_to_run, tests_to_skip
126
127     def _test_input_for_file(self, test_file, device_type=None):
128         return TestInput(test_file,
129             self._options.slow_time_out_ms if self._test_is_slow(test_file, device_type=device_type) else self._options.time_out_ms,
130             self._is_http_test(test_file),
131             should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file, device_type=device_type))
132
133     def _test_is_slow(self, test_file, device_type=None):
134         if self._expectations[device_type].model().has_modifier(test_file, test_expectations.SLOW):
135             return True
136         return "slow" in self._tests_options.get(test_file, [])
137
138     def _test_should_dump_jsconsolelog_in_stderr(self, test_file, device_type=None):
139         return self._expectations[device_type].model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)
140
141     def needs_servers(self, test_names):
142         return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
143
144     def _get_test_inputs(self, tests_to_run, repeat_each, iterations, device_type=None):
145         test_inputs = []
146         for _ in xrange(iterations):
147             for test in tests_to_run:
148                 for _ in xrange(repeat_each):
149                     test_inputs.append(self._test_input_for_file(test, device_type=device_type))
150         return test_inputs
151
152     def _update_worker_count(self, test_names, device_type=None):
153         test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations, device_type=device_type)
154         worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
155         self._options.child_processes = worker_count
156
157     def _set_up_run(self, test_names, device_type=None):
158         # This must be started before we check the system dependencies,
159         # since the helper may do things to make the setup correct.
160         self._printer.write_update("Starting helper ...")
161         if not self._port.start_helper(self._options.pixel_tests):
162             return False
163
164         self._update_worker_count(test_names, device_type=device_type)
165         self._port.reset_preferences()
166
167         # Check that the system dependencies (themes, fonts, ...) are correct.
168         if not self._options.nocheck_sys_deps:
169             self._printer.write_update("Checking system dependencies ...")
170             if not self._port.check_sys_deps():
171                 self._port.stop_helper()
172                 return False
173
174         self._port.setup_test_run(device_type)
175         return True
176
177     def run(self, args):
178         num_failed_uploads = 0
179         total_tests = set()
180         aggregate_test_names = set()
181         aggregate_tests = set()
182         tests_to_run_by_device = {}
183
184         device_type_list = self._port.supported_device_types()
185         for device_type in device_type_list:
186             """Run the tests and return a RunDetails object with the results."""
187             for_device_type = 'for {} '.format(device_type) if device_type else ''
188             self._printer.write_update('Collecting tests {}...'.format(for_device_type))
189             try:
190                 paths, test_names = self._collect_tests(args, device_type=device_type)
191             except IOError:
192                 # This is raised if --test-list doesn't exist
193                 return test_run_results.RunDetails(exit_code=-1)
194
195             self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
196             self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
197             self._expectations[device_type].parse_all_expectations()
198
199             aggregate_test_names.update(test_names)
200             tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
201
202             total_tests.update(tests_to_run)
203             total_tests.update(tests_to_skip)
204
205             tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
206             aggregate_tests.update(tests_to_run)
207
208         tests_to_skip = total_tests - aggregate_tests
209         self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
210         start_time = time.time()
211
212         # Check to make sure we're not skipping every test.
213         if not sum([len(tests) for tests in tests_to_run_by_device.itervalues()]):
214             _log.critical('No tests to run.')
215             return test_run_results.RunDetails(exit_code=-1)
216
217         needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in tests_to_run_by_device.itervalues() for test in tests)
218         needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests)
219         needs_websockets = any(self._is_websocket_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests)
220         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
221                                         needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)
222
223         self._printer.write_update("Checking build ...")
224         if not self._port.check_build():
225             _log.error("Build check failed")
226             return test_run_results.RunDetails(exit_code=-1)
227
228         if self._options.clobber_old_results:
229             self._clobber_old_results()
230
231         # Create the output directory if it doesn't already exist.
232         self._port.host.filesystem.maybe_make_directory(self._results_directory)
233
234         initial_results = None
235         retry_results = None
236         enabled_pixel_tests_in_retry = False
237
238         max_child_processes_for_run = 1
239         child_processes_option_value = self._options.child_processes
240
241         for device_type in device_type_list:
242             self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
243             self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))
244
245             _log.info('')
246             if not self._options.child_processes:
247                 _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
248                 _log.info('')
249                 continue
250
251             max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)
252
253             self._printer.print_baseline_search_path(device_type=device_type)
254
255             _log.info('Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), ' for {}'.format(str(device_type)) if device_type else ''))
256             _log.info('')
257             start_time_for_device = time.time()
258             if not tests_to_run_by_device[device_type]:
259                 continue
260             if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
261                 return test_run_results.RunDetails(exit_code=-1)
262
263             configuration = self._port.configuration_for_upload(self._port.target_host(0))
264             configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
265             temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)
266
267             if self._options.report_urls:
268                 self._printer.writeln('\n')
269                 self._printer.write_update('Preparing upload data ...')
270
271                 upload = Upload(
272                     suite='layout-tests',
273                     configuration=configuration,
274                     details=Upload.create_details(options=self._options),
275                     commits=self._port.commits_for_upload(),
276                     run_stats=Upload.create_run_stats(
277                         start_time=start_time_for_device,
278                         end_time=time.time(),
279                         tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
280                     ),
281                     results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
282                 )
283                 for url in self._options.report_urls:
284                     self._printer.write_update('Uploading to {} ...'.format(url))
285                     if not upload.upload(url, log_line_func=self._printer.writeln):
286                         num_failed_uploads += 1
287                 self._printer.writeln('Uploads completed!')
288
289             initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
290             retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
291             enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry
292
293         # Used for final logging, max_child_processes_for_run is most relevant here.
294         self._options.child_processes = max_child_processes_for_run
295
296         self._runner.stop_servers()
297
298         end_time = time.time()
299         result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
300         if num_failed_uploads:
301             result.exit_code = -1
302         return result
303
304     def _run_test_subset(self, tests_to_run, tests_to_skip, device_type=None):
305         try:
306             enabled_pixel_tests_in_retry = False
307             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)
308
309             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
310             # Don't retry failures when interrupted by user or failures limit exception.
311             retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
312             if retry_failures and tests_to_retry:
313                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
314
315                 _log.info('')
316                 _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
317                 _log.info('')
318                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True, device_type=device_type)
319
320                 if enabled_pixel_tests_in_retry:
321                     self._options.pixel_tests = False
322             else:
323                 retry_results = None
324         finally:
325             self._clean_up_run()
326
327         return (initial_results, retry_results, enabled_pixel_tests_in_retry)
328
329     def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
330         if initial_results is None:
331             _log.error('No results generated')
332             return test_run_results.RunDetails(exit_code=-1)
333
334         # Some crash logs can take a long time to be written out so look
335         # for new logs after the test run finishes.
336         _log.debug("looking for new crash logs")
337         self._look_for_new_crash_logs(initial_results, start_time)
338         if retry_results:
339             self._look_for_new_crash_logs(retry_results, start_time)
340
341         _log.debug("summarizing results")
342         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
343         results_including_passes = None
344         if self._options.results_server_host:
345             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
346         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
347
348         exit_code = -1
349         if not self._options.dry_run:
350             self._port.print_leaks_summary()
351             self._output_perf_metrics(end_time - start_time, initial_results)
352             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
353
354             results_path = self._filesystem.join(self._results_directory, "results.html")
355             self._copy_results_html_file(results_path)
356             if initial_results.keyboard_interrupted:
357                 exit_code = INTERRUPTED_EXIT_STATUS
358             else:
359                 if self._options.show_results and (initial_results.unexpected_results_by_name or
360                     (self._options.full_results_html and initial_results.total_failures)):
361                     self._port.show_results_html_file(results_path)
362                 exit_code = self._port.exit_code_from_summarized_results(summarized_results)
363         return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
364
365     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying, device_type=None):
366         test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations, device_type=device_type)
367
368         return self._runner.run_tests(self._expectations[device_type], test_inputs, tests_to_skip, num_workers, retrying)
369
370     def _clean_up_run(self):
371         _log.debug("Flushing stdout")
372         sys.stdout.flush()
373         _log.debug("Flushing stderr")
374         sys.stderr.flush()
375         _log.debug("Stopping helper")
376         self._port.stop_helper()
377         _log.debug("Cleaning up port")
378         self._port.clean_up_test_run()
379
380     def _force_pixel_tests_if_needed(self):
381         if self._options.pixel_tests:
382             return False
383
384         _log.debug("Restarting helper")
385         self._port.stop_helper()
386         self._options.pixel_tests = True
387         return self._port.start_helper()
388
389     def _look_for_new_crash_logs(self, run_results, start_time):
390         """Since crash logs can take a long time to be written out if the system is
391            under stress do a second pass at the end of the test run.
392
393            run_results: the results of the test run
394            start_time: time the tests started at.  We're looking for crash
395                logs after that time.
396         """
397         crashed_processes = []
398         for test, result in run_results.unexpected_results_by_name.iteritems():
399             if (result.type != test_expectations.CRASH):
400                 continue
401             for failure in result.failures:
402                 if not isinstance(failure, test_failures.FailureCrash):
403                     continue
404                 crashed_processes.append([test, failure.process_name, failure.pid])
405
406         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
407         if sample_files:
408             for test, sample_file in sample_files.iteritems():
409                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
410                 writer.copy_sample_file(sample_file)
411
412         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
413         if crash_logs:
414             for test, crash_log in crash_logs.iteritems():
415                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
416                 writer.write_crash_log(crash_log)
417
418                 # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
419                 if not any(process[0] == test for process in crashed_processes):
420                     result = test_results.TestResult(test)
421                     result.type = test_expectations.CRASH
422                     result.is_other_crash = True
423                     run_results.add(result, expected=False, test_is_slow=False)
424                     _log.debug("Adding results for other crash: " + str(test))
425
426     def _clobber_old_results(self):
427         # Just clobber the actual test results directories since the other
428         # files in the results directory are explicitly used for cross-run
429         # tracking.
430         self._printer.write_update("Clobbering old results in %s" %
431                                    self._results_directory)
432         layout_tests_dir = self._port.layout_tests_dir()
433         possible_dirs = self._port.test_dirs()
434         for dirname in possible_dirs:
435             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
436                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
437
438     def _tests_to_retry(self, run_results, include_crashes):
439         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
440                    ((result.type != test_expectations.PASS) and
441                     (result.type != test_expectations.MISSING) and
442                     (result.type != test_expectations.CRASH or include_crashes))]
443
444     def _output_perf_metrics(self, run_time, initial_results):
445         perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
446         perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
447         self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
448
449     def _results_to_upload_json_trie(self, expectations, results):
450         FAILURE_TO_TEXT = {
451             test_expectations.PASS: Upload.Expectations.PASS,
452             test_expectations.CRASH: Upload.Expectations.CRASH,
453             test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
454             test_expectations.IMAGE: Upload.Expectations.IMAGE,
455             test_expectations.TEXT: Upload.Expectations.TEXT,
456             test_expectations.AUDIO: Upload.Expectations.AUDIO,
457             test_expectations.MISSING: Upload.Expectations.WARNING,
458             test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
459         }
460
461         results_trie = {}
462         for result in results.results_by_name.itervalues():
463             if result.type == test_expectations.SKIP:
464                 continue
465
466             expected = expectations.filtered_expectations_for_test(
467                 result.test_name,
468                 self._options.pixel_tests or bool(result.reftest_type),
469                 self._options.world_leaks,
470             )
471             if expected == {test_expectations.PASS}:
472                 expected = None
473             else:
474                 expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])
475
476             json_results_generator.add_path_to_trie(
477                 result.test_name,
478                 Upload.create_test_result(
479                     expected=expected,
480                     actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
481                     time=int(result.test_run_time * 1000),
482                 ), results_trie)
483         return results_trie
484
485     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
486         """Writes the results of the test run as JSON files into the results
487         dir and upload the files to the appengine server.
488
489         Args:
490           summarized_results: dict of results
491           initial_results: full summary object
492         """
493         _log.debug("Writing JSON files in %s." % self._results_directory)
494
495         # FIXME: Upload stats.json to the server and delete times_ms.
496         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
497         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
498         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
499
500         stats_trie = self._stats_trie(initial_results)
501         stats_path = self._filesystem.join(self._results_directory, "stats.json")
502         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
503
504         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
505         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
506         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
507
508         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
509         if results_including_passes:
510             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
511
512         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
513             self._port, self._options.builder_name, self._options.build_name,
514             self._options.build_number, self._results_directory,
515             self._expectations, initial_results,
516             self._options.test_results_server,
517             "layout-tests",
518             self._options.master_name)
519
520         if generator.generate_json_output():
521             _log.debug("Finished writing JSON file for the test results server.")
522         else:
523             _log.debug("Failed to generate JSON file for the test results server.")
524             return
525
526         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
527
528         generator.upload_json_files(json_files)
529         if results_including_passes:
530             self.upload_results(results_json_path, start_time, end_time)
531
532         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
533
534         # Remove these files from the results directory so they don't take up too much space on the buildbot.
535         # The tools use the version we uploaded to the results server anyway.
536         self._filesystem.remove(times_json_path)
537         self._filesystem.remove(incremental_results_path)
538         if results_including_passes:
539             self._filesystem.remove(results_json_path)
540
541     def upload_results(self, results_json_path, start_time, end_time):
542         if not self._options.results_server_host:
543             return
544         master_name = self._options.master_name
545         builder_name = self._options.builder_name
546         build_number = self._options.build_number
547         build_slave = self._options.build_slave
548         if not master_name or not builder_name or not build_number or not build_slave:
549             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
550             return
551
552         revisions = {}
553         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
554         for (name, path) in self._port.repository_paths():
555             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
556             revision = scm.native_revision(path)
557             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
558
559         for hostname in self._options.results_server_host:
560             _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
561
562             attrs = [
563                 ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
564                 ('builder_name', builder_name),
565                 ('build_number', build_number),
566                 ('build_slave', build_slave),
567                 ('revisions', json.dumps(revisions)),
568                 ('start_time', str(start_time)),
569                 ('end_time', str(end_time)),
570             ]
571
572             uploader = FileUploader("http://%s/api/report" % hostname, 360)
573             try:
574                 response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
575                 if not response:
576                     _log.error("JSON upload failed; no response returned")
577                     continue
578
579                 if response.code != 200:
580                     _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
581                     continue
582
583                 response_text = response.read()
584                 try:
585                     response_json = json.loads(response_text)
586                 except ValueError as error:
587                     _log.error("JSON upload failed; failed to parse the response: %s", response_text)
588                     continue
589
590                 if response_json['status'] != 'OK':
591                     _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
592                     continue
593
594                 _log.info("JSON uploaded.")
595             except Exception as error:
596                 _log.error("Upload failed: %s" % error)
597                 continue
598
599     def _copy_results_html_file(self, destination_path):
600         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
601         results_file = self._filesystem.join(base_dir, 'results.html')
602         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
603         # so make sure it exists before we try to copy it.
604         if self._filesystem.exists(results_file):
605             self._filesystem.copyfile(results_file, destination_path)
606
607     def _stats_trie(self, initial_results):
608         def _worker_number(worker_name):
609             return int(worker_name.split('/')[1]) if worker_name else -1
610
611         stats = {}
612         for result in initial_results.results_by_name.values():
613             if result.type != test_expectations.SKIP:
614                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
615         stats_trie = {}
616         for name, value in stats.iteritems():
617             json_results_generator.add_path_to_trie(name, value, stats_trie)
618         return stats_trie
619
620     def _print_expectation_line_for_test(self, format_string, test, device_type=None):
621         line = self._expectations[device_type].model().get_expectation_line(test)
622         print(format_string.format(test, line.expected_behavior, self._expectations[device_type].readable_filename_and_line_number(line), line.original_string or ''))
623
624     def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
625         format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
626         if tests_to_skip:
627             print('')
628             print('Tests to skip ({})'.format(len(tests_to_skip)))
629             for test in sorted(tests_to_skip):
630                 self._print_expectation_line_for_test(format_string, test, device_type=device_type)
631
632         print('')
633         print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
634         for test in sorted(tests_to_run):
635             self._print_expectation_line_for_test(format_string, test, device_type=device_type)
636
637     def print_expectations(self, args):
638         aggregate_test_names = set()
639         aggregate_tests_to_run = set()
640         aggregate_tests_to_skip = set()
641         tests_to_run_by_device = {}
642
643         device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]
644         for device_type in device_type_list:
645             """Run the tests and return a RunDetails object with the results."""
646             for_device_type = 'for {} '.format(device_type) if device_type else ''
647             self._printer.write_update('Collecting tests {}...'.format(for_device_type))
648             try:
649                 paths, test_names = self._collect_tests(args, device_type=device_type)
650             except IOError:
651                 # This is raised if --test-list doesn't exist
652                 return test_run_results.RunDetails(exit_code=-1)
653
654             self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
655             self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
656             self._expectations[device_type].parse_all_expectations()
657
658             aggregate_test_names.update(test_names)
659             tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
660             aggregate_tests_to_skip.update(tests_to_skip)
661
662             tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
663             aggregate_tests_to_run.update(tests_to_run)
664
665         aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run
666
667         self._printer.print_found(len(aggregate_test_names), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
668         test_col_width = len(max(aggregate_tests_to_run.union(aggregate_tests_to_skip), key=len)) + 1
669
670         self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)
671
672         for device_type in device_type_list[1:]:
673             self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])
674
675         return 0