96d14755d37b7e7e32f3984ae0a8a26def59cdb2
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42 from collections import defaultdict
43
44 from webkitpy.common.checkout.scm.detection import SCMDetector
45 from webkitpy.common.net.file_uploader import FileUploader
46 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
47 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
48 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
49 from webkitpy.layout_tests.layout_package import json_layout_results_generator
50 from webkitpy.layout_tests.layout_package import json_results_generator
51 from webkitpy.layout_tests.models import test_expectations
52 from webkitpy.layout_tests.models import test_failures
53 from webkitpy.layout_tests.models import test_results
54 from webkitpy.layout_tests.models import test_run_results
55 from webkitpy.layout_tests.models.test_input import TestInput
56 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
57 from webkitpy.tool.grammar import pluralize
58 from webkitpy.xcode.device_type import DeviceType
59
60 _log = logging.getLogger(__name__)
61
62 TestExpectations = test_expectations.TestExpectations
63
64
65 class Manager(object):
66     """A class for managing running a series of tests on a series of layout
67     test files."""
68
69     def __init__(self, port, options, printer):
70         """Initialize test runner data structures.
71
72         Args:
73           port: an object implementing port-specific
74           options: a dictionary of command line options
75           printer: a Printer object to record updates to.
76         """
77         self._port = port
78         self._filesystem = port.host.filesystem
79         self._options = options
80         self._printer = printer
81         self._expectations = None
82         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
83         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
84         self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
85         self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
86         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
87         self._results_directory = self._port.results_directory()
88         self._finder = LayoutTestFinder(self._port, self._options)
89         self._runner = None
90
91         test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
92         self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}
93
94     def _collect_tests(self, args):
95         return self._finder.find_tests(self._options, args)
96
97     def _is_http_test(self, test):
98         return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)
99
100     def _is_websocket_test(self, test):
101         return self.WEBSOCKET_SUBDIR in test
102
103     def _needs_web_platform_test(self, test):
104         return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test
105
106     def _custom_device_for_test(self, test):
107         # FIXME: This is a terrible way to do device-specific expected results https://bugs.webkit.org/show_bug.cgi?id=192162
108         for device_type in self._port.CUSTOM_DEVICE_TYPES:
109             if device_type.hardware_family and device_type.hardware_family.lower() + self._port.TEST_PATH_SEPARATOR in test:
110                 return device_type
111             if device_type.hardware_family and device_type.hardware_type and \
112                 (device_type.hardware_family + device_type.hardware_type).lower().replace(' ', '') + self._port.TEST_PATH_SEPARATOR in test:
113                 return device_type
114         return None
115
116     def _http_tests(self, test_names):
117         return set(test for test in test_names if self._is_http_test(test))
118
119     def _prepare_lists(self, paths, test_names):
120         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
121         tests_to_run = [test for test in test_names if test not in tests_to_skip]
122
123         # Create a sorted list of test files so the subset chunk,
124         # if used, contains alphabetically consecutive tests.
125         if self._options.order == 'natural':
126             tests_to_run.sort(key=self._port.test_key)
127         elif self._options.order == 'random':
128             random.shuffle(tests_to_run)
129
130         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
131         self._expectations.add_skipped_tests(tests_in_other_chunks)
132         tests_to_skip.update(tests_in_other_chunks)
133
134         return tests_to_run, tests_to_skip
135
136     def _test_input_for_file(self, test_file):
137         return TestInput(test_file,
138             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
139             self._is_http_test(test_file),
140             should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file))
141
142     def _test_is_slow(self, test_file):
143         if self._expectations.model().has_modifier(test_file, test_expectations.SLOW):
144             return True
145         return "slow" in self._tests_options.get(test_file, [])
146
147     def _test_should_dump_jsconsolelog_in_stderr(self, test_file):
148         return self._expectations.model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)
149
150     def needs_servers(self, test_names):
151         return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
152
153     def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
154         test_inputs = []
155         for _ in xrange(iterations):
156             for test in tests_to_run:
157                 for _ in xrange(repeat_each):
158                     test_inputs.append(self._test_input_for_file(test))
159         return test_inputs
160
161     def _update_worker_count(self, test_names):
162         test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
163         worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
164         self._options.child_processes = worker_count
165
166     def _set_up_run(self, test_names, device_type=None):
167         # This must be started before we check the system dependencies,
168         # since the helper may do things to make the setup correct.
169         self._printer.write_update("Starting helper ...")
170         if not self._port.start_helper(self._options.pixel_tests):
171             return False
172
173         self._update_worker_count(test_names)
174         self._port.reset_preferences()
175
176         # Check that the system dependencies (themes, fonts, ...) are correct.
177         if not self._options.nocheck_sys_deps:
178             self._printer.write_update("Checking system dependencies ...")
179             if not self._port.check_sys_deps():
180                 self._port.stop_helper()
181                 return False
182
183         self._port.setup_test_run(device_type)
184         return True
185
186     def run(self, args):
187         """Run the tests and return a RunDetails object with the results."""
188         self._printer.write_update("Collecting tests ...")
189         try:
190             paths, test_names = self._collect_tests(args)
191         except IOError:
192             # This is raised if --test-list doesn't exist
193             return test_run_results.RunDetails(exit_code=-1)
194
195         self._printer.write_update("Parsing expectations ...")
196         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
197         self._expectations.parse_all_expectations()
198
199         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
200         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
201         start_time = time.time()
202
203         # Check to make sure we're not skipping every test.
204         if not tests_to_run:
205             _log.critical('No tests to run.')
206             return test_run_results.RunDetails(exit_code=-1)
207
208         # Look for tests with custom device requirements.
209         test_device_mapping = defaultdict(list)
210         for test_file in tests_to_run:
211             test_device_mapping[self._custom_device_for_test(test_file) or self._port.DEFAULT_DEVICE_TYPE].append(test_file)
212
213         # Order device types from most specific to least specific in the hopes that some of the more specific device
214         # types will match the less specific device types.
215         device_type_order = []
216         types_with_family = []
217         remaining_types = []
218         for device_type in test_device_mapping.iterkeys():
219             if device_type and device_type.hardware_family and device_type.hardware_type:
220                 device_type_order.append(device_type)
221             elif device_type and device_type.hardware_family:
222                 types_with_family.append(device_type)
223             else:
224                 remaining_types.append(device_type)
225         device_type_order.extend(types_with_family + remaining_types)
226
227         needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run)
228         needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run)
229         needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
230         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
231                                         needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)
232
233         self._printer.write_update("Checking build ...")
234         if not self._port.check_build():
235             _log.error("Build check failed")
236             return test_run_results.RunDetails(exit_code=-1)
237
238         if self._options.clobber_old_results:
239             self._clobber_old_results()
240
241         # Create the output directory if it doesn't already exist.
242         self._port.host.filesystem.maybe_make_directory(self._results_directory)
243
244         initial_results = None
245         retry_results = None
246         enabled_pixel_tests_in_retry = False
247
248         max_child_processes_for_run = 1
249         child_processes_option_value = self._options.child_processes
250
251         while device_type_order:
252             device_type = device_type_order[0]
253             tests = test_device_mapping[device_type]
254             del device_type_order[0]
255
256             self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))
257
258             _log.info('')
259             if not self._options.child_processes:
260                 _log.info('Skipping {} because {} is not available'.format(pluralize(len(test_device_mapping[device_type]), 'test'), str(device_type)))
261                 _log.info('')
262                 continue
263
264             max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)
265
266             # This loop looks for any less-specific device types which match the current device type
267             index = 0
268             while index < len(device_type_order):
269                 if device_type_order[index] == device_type:
270                     tests.extend(test_device_mapping[device_type_order[index]])
271
272                     # Remove devices types from device_type_order once tests associated with that type have been claimed.
273                     del device_type_order[index]
274                 else:
275                     index += 1
276
277             self._printer.print_baseline_search_path(device_type=device_type)
278
279             _log.info('Running {}{}'.format(pluralize(len(tests), 'test'), ' for {}'.format(str(device_type)) if device_type else ''))
280             _log.info('')
281             if not self._set_up_run(tests, device_type):
282                 return test_run_results.RunDetails(exit_code=-1)
283
284             temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests, tests_to_skip)
285             initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
286             retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
287             enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry
288
289         # Used for final logging, max_child_processes_for_run is most relevant here.
290         self._options.child_processes = max_child_processes_for_run
291
292         self._runner.stop_servers()
293
294         end_time = time.time()
295         return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
296
297     def _run_test_subset(self, tests_to_run, tests_to_skip):
298         try:
299             enabled_pixel_tests_in_retry = False
300             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False)
301
302             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
303             # Don't retry failures when interrupted by user or failures limit exception.
304             retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
305             if retry_failures and tests_to_retry:
306                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
307
308                 _log.info('')
309                 _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
310                 _log.info('')
311                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True)
312
313                 if enabled_pixel_tests_in_retry:
314                     self._options.pixel_tests = False
315             else:
316                 retry_results = None
317         finally:
318             self._clean_up_run()
319
320         return (initial_results, retry_results, enabled_pixel_tests_in_retry)
321
322     def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
323         if initial_results is None:
324             _log.error('No results generated')
325             return test_run_results.RunDetails(exit_code=-1)
326
327         # Some crash logs can take a long time to be written out so look
328         # for new logs after the test run finishes.
329         _log.debug("looking for new crash logs")
330         self._look_for_new_crash_logs(initial_results, start_time)
331         if retry_results:
332             self._look_for_new_crash_logs(retry_results, start_time)
333
334         _log.debug("summarizing results")
335         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
336         results_including_passes = None
337         if self._options.results_server_host:
338             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
339         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
340
341         exit_code = -1
342         if not self._options.dry_run:
343             self._port.print_leaks_summary()
344             self._output_perf_metrics(end_time - start_time, initial_results)
345             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
346
347             results_path = self._filesystem.join(self._results_directory, "results.html")
348             self._copy_results_html_file(results_path)
349             if initial_results.keyboard_interrupted:
350                 exit_code = INTERRUPTED_EXIT_STATUS
351             else:
352                 if self._options.show_results and (initial_results.unexpected_results_by_name or
353                     (self._options.full_results_html and initial_results.total_failures)):
354                     self._port.show_results_html_file(results_path)
355                 exit_code = self._port.exit_code_from_summarized_results(summarized_results)
356         return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
357
358     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
359         test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)
360
361         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
362
363     def _clean_up_run(self):
364         _log.debug("Flushing stdout")
365         sys.stdout.flush()
366         _log.debug("Flushing stderr")
367         sys.stderr.flush()
368         _log.debug("Stopping helper")
369         self._port.stop_helper()
370         _log.debug("Cleaning up port")
371         self._port.clean_up_test_run()
372
373     def _force_pixel_tests_if_needed(self):
374         if self._options.pixel_tests:
375             return False
376
377         _log.debug("Restarting helper")
378         self._port.stop_helper()
379         self._options.pixel_tests = True
380         return self._port.start_helper()
381
382     def _look_for_new_crash_logs(self, run_results, start_time):
383         """Since crash logs can take a long time to be written out if the system is
384            under stress do a second pass at the end of the test run.
385
386            run_results: the results of the test run
387            start_time: time the tests started at.  We're looking for crash
388                logs after that time.
389         """
390         crashed_processes = []
391         for test, result in run_results.unexpected_results_by_name.iteritems():
392             if (result.type != test_expectations.CRASH):
393                 continue
394             for failure in result.failures:
395                 if not isinstance(failure, test_failures.FailureCrash):
396                     continue
397                 crashed_processes.append([test, failure.process_name, failure.pid])
398
399         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
400         if sample_files:
401             for test, sample_file in sample_files.iteritems():
402                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
403                 writer.copy_sample_file(sample_file)
404
405         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
406         if crash_logs:
407             for test, crash_log in crash_logs.iteritems():
408                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
409                 writer.write_crash_log(crash_log)
410
411                 # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
412                 if not any(process[0] == test for process in crashed_processes):
413                     result = test_results.TestResult(test)
414                     result.type = test_expectations.CRASH
415                     result.is_other_crash = True
416                     run_results.add(result, expected=False, test_is_slow=False)
417                     _log.debug("Adding results for other crash: " + str(test))
418
419     def _clobber_old_results(self):
420         # Just clobber the actual test results directories since the other
421         # files in the results directory are explicitly used for cross-run
422         # tracking.
423         self._printer.write_update("Clobbering old results in %s" %
424                                    self._results_directory)
425         layout_tests_dir = self._port.layout_tests_dir()
426         possible_dirs = self._port.test_dirs()
427         for dirname in possible_dirs:
428             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
429                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
430
431     def _tests_to_retry(self, run_results, include_crashes):
432         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
433                    ((result.type != test_expectations.PASS) and
434                     (result.type != test_expectations.MISSING) and
435                     (result.type != test_expectations.CRASH or include_crashes))]
436
437     def _output_perf_metrics(self, run_time, initial_results):
438         perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
439         perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
440         self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
441
442     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
443         """Writes the results of the test run as JSON files into the results
444         dir and upload the files to the appengine server.
445
446         Args:
447           summarized_results: dict of results
448           initial_results: full summary object
449         """
450         _log.debug("Writing JSON files in %s." % self._results_directory)
451
452         # FIXME: Upload stats.json to the server and delete times_ms.
453         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
454         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
455         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
456
457         stats_trie = self._stats_trie(initial_results)
458         stats_path = self._filesystem.join(self._results_directory, "stats.json")
459         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
460
461         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
462         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
463         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
464
465         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
466         if results_including_passes:
467             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
468
469         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
470             self._port, self._options.builder_name, self._options.build_name,
471             self._options.build_number, self._results_directory,
472             self._expectations, initial_results,
473             self._options.test_results_server,
474             "layout-tests",
475             self._options.master_name)
476
477         if generator.generate_json_output():
478             _log.debug("Finished writing JSON file for the test results server.")
479         else:
480             _log.debug("Failed to generate JSON file for the test results server.")
481             return
482
483         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
484
485         generator.upload_json_files(json_files)
486         if results_including_passes:
487             self.upload_results(results_json_path, start_time, end_time)
488
489         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
490
491         # Remove these files from the results directory so they don't take up too much space on the buildbot.
492         # The tools use the version we uploaded to the results server anyway.
493         self._filesystem.remove(times_json_path)
494         self._filesystem.remove(incremental_results_path)
495         if results_including_passes:
496             self._filesystem.remove(results_json_path)
497
498     def upload_results(self, results_json_path, start_time, end_time):
499         if not self._options.results_server_host:
500             return
501         master_name = self._options.master_name
502         builder_name = self._options.builder_name
503         build_number = self._options.build_number
504         build_slave = self._options.build_slave
505         if not master_name or not builder_name or not build_number or not build_slave:
506             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
507             return
508
509         revisions = {}
510         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
511         for (name, path) in self._port.repository_paths():
512             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
513             revision = scm.native_revision(path)
514             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
515
516         for hostname in self._options.results_server_host:
517             _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
518
519             attrs = [
520                 ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
521                 ('builder_name', builder_name),
522                 ('build_number', build_number),
523                 ('build_slave', build_slave),
524                 ('revisions', json.dumps(revisions)),
525                 ('start_time', str(start_time)),
526                 ('end_time', str(end_time)),
527             ]
528
529             uploader = FileUploader("http://%s/api/report" % hostname, 360)
530             try:
531                 response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
532                 if not response:
533                     _log.error("JSON upload failed; no response returned")
534                     continue
535
536                 if response.code != 200:
537                     _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
538                     continue
539
540                 response_text = response.read()
541                 try:
542                     response_json = json.loads(response_text)
543                 except ValueError as error:
544                     _log.error("JSON upload failed; failed to parse the response: %s", response_text)
545                     continue
546
547                 if response_json['status'] != 'OK':
548                     _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
549                     continue
550
551                 _log.info("JSON uploaded.")
552             except Exception as error:
553                 _log.error("Upload failed: %s" % error)
554                 continue
555
556     def _copy_results_html_file(self, destination_path):
557         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
558         results_file = self._filesystem.join(base_dir, 'results.html')
559         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
560         # so make sure it exists before we try to copy it.
561         if self._filesystem.exists(results_file):
562             self._filesystem.copyfile(results_file, destination_path)
563
564     def _stats_trie(self, initial_results):
565         def _worker_number(worker_name):
566             return int(worker_name.split('/')[1]) if worker_name else -1
567
568         stats = {}
569         for result in initial_results.results_by_name.values():
570             if result.type != test_expectations.SKIP:
571                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
572         stats_trie = {}
573         for name, value in stats.iteritems():
574             json_results_generator.add_path_to_trie(name, value, stats_trie)
575         return stats_trie
576
577     def _print_expectation_line_for_test(self, format_string, test):
578         line = self._expectations.model().get_expectation_line(test)
579         print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
580
581     def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
582         format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
583         if tests_to_skip:
584             print('')
585             print('Tests to skip ({})'.format(len(tests_to_skip)))
586             for test in sorted(tests_to_skip):
587                 self._print_expectation_line_for_test(format_string, test)
588
589         print('')
590         print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
591         for test in sorted(tests_to_run):
592             self._print_expectation_line_for_test(format_string, test)
593
594     def print_expectations(self, args):
595         self._printer.write_update("Collecting tests ...")
596         try:
597             paths, test_names = self._collect_tests(args)
598         except IOError:
599             # This is raised if --test-list doesn't exist
600             return -1
601
602         self._printer.write_update("Parsing expectations ...")
603         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
604         self._expectations.parse_all_expectations()
605
606         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
607         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
608
609         test_col_width = len(max(tests_to_run + list(tests_to_skip), key=len)) + 1
610
611         default_device_tests = []
612
613         # Look for tests with custom device requirements.
614         custom_device_tests = defaultdict(list)
615         for test_file in tests_to_run:
616             custom_device = self._custom_device_for_test(test_file)
617             if custom_device:
618                 custom_device_tests[custom_device].append(test_file)
619             else:
620                 default_device_tests.append(test_file)
621
622         if custom_device_tests:
623             for device_type, tests in custom_device_tests.iteritems():
624                 _log.debug('{} tests use device {}'.format(len(tests), device_type))
625
626         self._print_expectations_for_subset(None, test_col_width, tests_to_run, tests_to_skip)
627
628         for device_type, tests in custom_device_tests.iteritems():
629             self._print_expectations_for_subset(device_type, test_col_width, tests)
630
631         return 0