69369c61fd2ca568f5a83cda881e39da83d8ce04
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42 from collections import defaultdict, OrderedDict
43
44 from webkitpy.common.checkout.scm.detection import SCMDetector
45 from webkitpy.common.net.file_uploader import FileUploader
46 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
47 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
48 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
49 from webkitpy.layout_tests.layout_package import json_layout_results_generator
50 from webkitpy.layout_tests.layout_package import json_results_generator
51 from webkitpy.layout_tests.models import test_expectations
52 from webkitpy.layout_tests.models import test_failures
53 from webkitpy.layout_tests.models import test_results
54 from webkitpy.layout_tests.models import test_run_results
55 from webkitpy.layout_tests.models.test_input import TestInput
56 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
57 from webkitpy.tool.grammar import pluralize
58 from webkitpy.results.upload import Upload
59 from webkitpy.xcode.device_type import DeviceType
60
61 _log = logging.getLogger(__name__)
62
63 TestExpectations = test_expectations.TestExpectations
64
65
66 class Manager(object):
67     """A class for managing running a series of tests on a series of layout
68     test files."""
69
70     def __init__(self, port, options, printer):
71         """Initialize test runner data structures.
72
73         Args:
74           port: an object implementing port-specific
75           options: a dictionary of command line options
76           printer: a Printer object to record updates to.
77         """
78         self._port = port
79         self._filesystem = port.host.filesystem
80         self._options = options
81         self._printer = printer
82         self._expectations = OrderedDict()
83         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
84         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
85         self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
86         self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
87         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
88         self._results_directory = self._port.results_directory()
89         self._finder = LayoutTestFinder(self._port, self._options)
90         self._runner = None
91
92         test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
93         self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}
94
95     def _collect_tests(self, args, device_type=None):
96         return self._finder.find_tests(self._options, args, device_type=device_type)
97
98     def _is_http_test(self, test):
99         return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)
100
101     def _is_websocket_test(self, test):
102         return self.WEBSOCKET_SUBDIR in test
103
104     def _needs_web_platform_test(self, test):
105         return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test
106
107     def _http_tests(self, test_names):
108         return set(test for test in test_names if self._is_http_test(test))
109
110     def _prepare_lists(self, paths, test_names, device_type=None):
111         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations[device_type], self._http_tests(test_names))
112         tests_to_run = [test for test in test_names if test not in tests_to_skip]
113
114         # Create a sorted list of test files so the subset chunk,
115         # if used, contains alphabetically consecutive tests.
116         if self._options.order == 'natural':
117             tests_to_run.sort(key=self._port.test_key)
118         elif self._options.order == 'random':
119             random.shuffle(tests_to_run)
120
121         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
122         self._expectations[device_type].add_skipped_tests(tests_in_other_chunks)
123         tests_to_skip.update(tests_in_other_chunks)
124
125         return tests_to_run, tests_to_skip
126
127     def _test_input_for_file(self, test_file, device_type=None):
128         return TestInput(test_file,
129             self._options.slow_time_out_ms if self._test_is_slow(test_file, device_type=device_type) else self._options.time_out_ms,
130             self._is_http_test(test_file),
131             should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file, device_type=device_type))
132
133     def _test_is_slow(self, test_file, device_type=None):
134         if self._expectations[device_type].model().has_modifier(test_file, test_expectations.SLOW):
135             return True
136         return "slow" in self._tests_options.get(test_file, [])
137
138     def _test_should_dump_jsconsolelog_in_stderr(self, test_file, device_type=None):
139         return self._expectations[device_type].model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)
140
141     def needs_servers(self, test_names):
142         return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
143
144     def _get_test_inputs(self, tests_to_run, repeat_each, iterations, device_type=None):
145         test_inputs = []
146         for _ in xrange(iterations):
147             for test in tests_to_run:
148                 for _ in xrange(repeat_each):
149                     test_inputs.append(self._test_input_for_file(test, device_type=device_type))
150         return test_inputs
151
152     def _update_worker_count(self, test_names, device_type=None):
153         test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations, device_type=device_type)
154         worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
155         self._options.child_processes = worker_count
156
157     def _set_up_run(self, test_names, device_type=None):
158         # This must be started before we check the system dependencies,
159         # since the helper may do things to make the setup correct.
160         self._printer.write_update("Starting helper ...")
161         if not self._port.start_helper(self._options.pixel_tests):
162             return False
163
164         self._update_worker_count(test_names, device_type=device_type)
165         self._port.reset_preferences()
166
167         # Check that the system dependencies (themes, fonts, ...) are correct.
168         if not self._options.nocheck_sys_deps:
169             self._printer.write_update("Checking system dependencies ...")
170             if not self._port.check_sys_deps():
171                 self._port.stop_helper()
172                 return False
173
174         self._port.setup_test_run(device_type)
175         return True
176
177     def run(self, args):
178         num_failed_uploads = 0
179         total_tests = set()
180         aggregate_test_names = set()
181         aggregate_tests = set()
182         tests_to_run_by_device = {}
183
184         device_type_list = self._port.supported_device_types()
185         for device_type in device_type_list:
186             """Run the tests and return a RunDetails object with the results."""
187             for_device_type = u'for {} '.format(device_type) if device_type else ''
188             self._printer.write_update(u'Collecting tests {}...'.format(for_device_type))
189             try:
190                 paths, test_names = self._collect_tests(args, device_type=device_type)
191             except IOError:
192                 # This is raised if --test-list doesn't exist
193                 return test_run_results.RunDetails(exit_code=-1)
194
195             self._printer.write_update(u'Parsing expectations {}...'.format(for_device_type))
196             self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
197             self._expectations[device_type].parse_all_expectations()
198
199             aggregate_test_names.update(test_names)
200             tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
201
202             total_tests.update(tests_to_run)
203             total_tests.update(tests_to_skip)
204
205             tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests]
206             aggregate_tests.update(tests_to_run)
207
208         # If a test is marked skipped, but was explicitly requested, run it anyways
209         if self._options.skipped != 'always':
210             for arg in args:
211                 if arg in total_tests and arg not in aggregate_tests:
212                     tests_to_run_by_device[device_type_list[0]].append(arg)
213                     aggregate_tests.add(arg)
214
215         tests_to_skip = total_tests - aggregate_tests
216         self._printer.print_found(len(aggregate_test_names), len(aggregate_tests), self._options.repeat_each, self._options.iterations)
217         start_time = time.time()
218
219         # Check to make sure we're not skipping every test.
220         if not sum([len(tests) for tests in tests_to_run_by_device.itervalues()]):
221             _log.critical('No tests to run.')
222             return test_run_results.RunDetails(exit_code=-1)
223
224         needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for tests in tests_to_run_by_device.itervalues() for test in tests)
225         needs_web_platform_test_server = any(self._needs_web_platform_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests)
226         needs_websockets = any(self._is_websocket_test(test) for tests in tests_to_run_by_device.itervalues() for test in tests)
227         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
228                                         needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)
229
230         self._printer.write_update("Checking build ...")
231         if not self._port.check_build():
232             _log.error("Build check failed")
233             return test_run_results.RunDetails(exit_code=-1)
234
235         if self._options.clobber_old_results:
236             self._clobber_old_results()
237
238         # Create the output directory if it doesn't already exist.
239         self._port.host.filesystem.maybe_make_directory(self._results_directory)
240
241         initial_results = None
242         retry_results = None
243         enabled_pixel_tests_in_retry = False
244
245         max_child_processes_for_run = 1
246         child_processes_option_value = self._options.child_processes
247
248         for device_type in device_type_list:
249             self._runner._test_is_slow = lambda test_file: self._test_is_slow(test_file, device_type=device_type)
250             self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))
251
252             _log.info('')
253             if not self._options.child_processes:
254                 _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
255                 _log.info('')
256                 continue
257
258             max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)
259
260             self._printer.print_baseline_search_path(device_type=device_type)
261
262             _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
263             _log.info('')
264             start_time_for_device = time.time()
265             if not tests_to_run_by_device[device_type]:
266                 continue
267             if not self._set_up_run(tests_to_run_by_device[device_type], device_type=device_type):
268                 return test_run_results.RunDetails(exit_code=-1)
269
270             configuration = self._port.configuration_for_upload(self._port.target_host(0))
271             if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
272                 configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
273             temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(tests_to_run_by_device[device_type], tests_to_skip, device_type=device_type)
274
275             if self._options.report_urls:
276                 self._printer.writeln('\n')
277                 self._printer.write_update('Preparing upload data ...')
278
279                 upload = Upload(
280                     suite='layout-tests',
281                     configuration=configuration,
282                     details=Upload.create_details(options=self._options),
283                     commits=self._port.commits_for_upload(),
284                     run_stats=Upload.create_run_stats(
285                         start_time=start_time_for_device,
286                         end_time=time.time(),
287                         tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
288                     ),
289                     results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
290                 )
291                 for url in self._options.report_urls:
292                     self._printer.write_update('Uploading to {} ...'.format(url))
293                     if not upload.upload(url, log_line_func=self._printer.writeln):
294                         num_failed_uploads += 1
295                 self._printer.writeln('Uploads completed!')
296
297             initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
298             retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
299             enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry
300
301             if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
302                     (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
303                 break
304
305         # Used for final logging, max_child_processes_for_run is most relevant here.
306         self._options.child_processes = max_child_processes_for_run
307
308         self._runner.stop_servers()
309
310         end_time = time.time()
311         result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
312         if num_failed_uploads:
313             result.exit_code = -1
314         return result
315
316     def _run_test_subset(self, tests_to_run, tests_to_skip, device_type=None):
317         try:
318             enabled_pixel_tests_in_retry = False
319             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)
320
321             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
322             # Don't retry failures when interrupted by user or failures limit exception.
323             retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
324             if retry_failures and tests_to_retry:
325                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
326
327                 _log.info('')
328                 _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
329                 _log.info('')
330                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True, device_type=device_type)
331
332                 if enabled_pixel_tests_in_retry:
333                     self._options.pixel_tests = False
334             else:
335                 retry_results = None
336         finally:
337             self._clean_up_run()
338
339         return (initial_results, retry_results, enabled_pixel_tests_in_retry)
340
341     def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
342         if initial_results is None:
343             _log.error('No results generated')
344             return test_run_results.RunDetails(exit_code=-1)
345
346         # Some crash logs can take a long time to be written out so look
347         # for new logs after the test run finishes.
348         _log.debug("looking for new crash logs")
349         self._look_for_new_crash_logs(initial_results, start_time)
350         if retry_results:
351             self._look_for_new_crash_logs(retry_results, start_time)
352
353         _log.debug("summarizing results")
354         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
355         results_including_passes = None
356         if self._options.results_server_host:
357             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
358         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
359
360         exit_code = -1
361         if not self._options.dry_run:
362             self._port.print_leaks_summary()
363             self._output_perf_metrics(end_time - start_time, initial_results)
364             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
365
366             results_path = self._filesystem.join(self._results_directory, "results.html")
367             self._copy_results_html_file(results_path)
368             if initial_results.keyboard_interrupted:
369                 exit_code = INTERRUPTED_EXIT_STATUS
370             else:
371                 if self._options.show_results and (initial_results.unexpected_results_by_name or
372                     (self._options.full_results_html and initial_results.total_failures)):
373                     self._port.show_results_html_file(results_path)
374                 exit_code = self._port.exit_code_from_summarized_results(summarized_results)
375         return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
376
377     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying, device_type=None):
378         test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations, device_type=device_type)
379
380         return self._runner.run_tests(self._expectations[device_type], test_inputs, tests_to_skip, num_workers, retrying)
381
382     def _clean_up_run(self):
383         _log.debug("Flushing stdout")
384         sys.stdout.flush()
385         _log.debug("Flushing stderr")
386         sys.stderr.flush()
387         _log.debug("Stopping helper")
388         self._port.stop_helper()
389         _log.debug("Cleaning up port")
390         self._port.clean_up_test_run()
391
392     def _force_pixel_tests_if_needed(self):
393         if self._options.pixel_tests:
394             return False
395
396         _log.debug("Restarting helper")
397         self._port.stop_helper()
398         self._options.pixel_tests = True
399         return self._port.start_helper()
400
401     def _look_for_new_crash_logs(self, run_results, start_time):
402         """Since crash logs can take a long time to be written out if the system is
403            under stress do a second pass at the end of the test run.
404
405            run_results: the results of the test run
406            start_time: time the tests started at.  We're looking for crash
407                logs after that time.
408         """
409         crashed_processes = []
410         for test, result in run_results.unexpected_results_by_name.iteritems():
411             if (result.type != test_expectations.CRASH):
412                 continue
413             for failure in result.failures:
414                 if not isinstance(failure, test_failures.FailureCrash):
415                     continue
416                 crashed_processes.append([test, failure.process_name, failure.pid])
417
418         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
419         if sample_files:
420             for test, sample_file in sample_files.iteritems():
421                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
422                 writer.copy_sample_file(sample_file)
423
424         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
425         if crash_logs:
426             for test, crash_log in crash_logs.iteritems():
427                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
428                 writer.write_crash_log(crash_log)
429
430                 # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
431                 if not any(process[0] == test for process in crashed_processes):
432                     result = test_results.TestResult(test)
433                     result.type = test_expectations.CRASH
434                     result.is_other_crash = True
435                     run_results.add(result, expected=False, test_is_slow=False)
436                     _log.debug("Adding results for other crash: " + str(test))
437
438     def _clobber_old_results(self):
439         # Just clobber the actual test results directories since the other
440         # files in the results directory are explicitly used for cross-run
441         # tracking.
442         self._printer.write_update("Clobbering old results in %s" %
443                                    self._results_directory)
444         layout_tests_dir = self._port.layout_tests_dir()
445         possible_dirs = self._port.test_dirs()
446         for dirname in possible_dirs:
447             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
448                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
449
450     def _tests_to_retry(self, run_results, include_crashes):
451         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
452                    ((result.type != test_expectations.PASS) and
453                     (result.type != test_expectations.MISSING) and
454                     (result.type != test_expectations.CRASH or include_crashes))]
455
456     def _output_perf_metrics(self, run_time, initial_results):
457         perf_metrics_json = json_results_generator.perf_metrics_for_test(run_time, initial_results.results_by_name.values())
458         perf_metrics_path = self._filesystem.join(self._results_directory, "layout_test_perf_metrics.json")
459         self._filesystem.write_text_file(perf_metrics_path, json.dumps(perf_metrics_json))
460
461     def _results_to_upload_json_trie(self, expectations, results):
462         FAILURE_TO_TEXT = {
463             test_expectations.PASS: Upload.Expectations.PASS,
464             test_expectations.CRASH: Upload.Expectations.CRASH,
465             test_expectations.TIMEOUT: Upload.Expectations.TIMEOUT,
466             test_expectations.IMAGE: Upload.Expectations.IMAGE,
467             test_expectations.TEXT: Upload.Expectations.TEXT,
468             test_expectations.AUDIO: Upload.Expectations.AUDIO,
469             test_expectations.MISSING: Upload.Expectations.WARNING,
470             test_expectations.IMAGE_PLUS_TEXT: ' '.join([Upload.Expectations.IMAGE, Upload.Expectations.TEXT]),
471         }
472
473         results_trie = {}
474         for result in results.results_by_name.itervalues():
475             if result.type == test_expectations.SKIP:
476                 continue
477
478             expected = expectations.filtered_expectations_for_test(
479                 result.test_name,
480                 self._options.pixel_tests or bool(result.reftest_type),
481                 self._options.world_leaks,
482             )
483             if expected == {test_expectations.PASS}:
484                 expected = None
485             else:
486                 expected = ' '.join([FAILURE_TO_TEXT.get(e, Upload.Expectations.FAIL) for e in expected])
487
488             json_results_generator.add_path_to_trie(
489                 result.test_name,
490                 Upload.create_test_result(
491                     expected=expected,
492                     actual=FAILURE_TO_TEXT.get(result.type, Upload.Expectations.FAIL) if result.type else None,
493                     time=int(result.test_run_time * 1000),
494                 ), results_trie)
495         return results_trie
496
497     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
498         """Writes the results of the test run as JSON files into the results
499         dir and upload the files to the appengine server.
500
501         Args:
502           summarized_results: dict of results
503           initial_results: full summary object
504         """
505         _log.debug("Writing JSON files in %s." % self._results_directory)
506
507         # FIXME: Upload stats.json to the server and delete times_ms.
508         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
509         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
510         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
511
512         stats_trie = self._stats_trie(initial_results)
513         stats_path = self._filesystem.join(self._results_directory, "stats.json")
514         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
515
516         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
517         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
518         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
519
520         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
521         if results_including_passes:
522             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
523
524         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
525             self._port, self._options.builder_name, self._options.build_name,
526             self._options.build_number, self._results_directory,
527             self._expectations, initial_results,
528             self._options.test_results_server,
529             "layout-tests",
530             self._options.master_name)
531
532         if generator.generate_json_output():
533             _log.debug("Finished writing JSON file for the test results server.")
534         else:
535             _log.debug("Failed to generate JSON file for the test results server.")
536             return
537
538         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
539
540         generator.upload_json_files(json_files)
541         if results_including_passes:
542             self.upload_results(results_json_path, start_time, end_time)
543
544         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
545
546         # Remove these files from the results directory so they don't take up too much space on the buildbot.
547         # The tools use the version we uploaded to the results server anyway.
548         self._filesystem.remove(times_json_path)
549         self._filesystem.remove(incremental_results_path)
550         if results_including_passes:
551             self._filesystem.remove(results_json_path)
552
553     def upload_results(self, results_json_path, start_time, end_time):
554         if not self._options.results_server_host:
555             return
556         master_name = self._options.master_name
557         builder_name = self._options.builder_name
558         build_number = self._options.build_number
559         build_slave = self._options.build_slave
560         if not master_name or not builder_name or not build_number or not build_slave:
561             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
562             return
563
564         revisions = {}
565         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
566         for (name, path) in self._port.repository_paths():
567             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
568             revision = scm.native_revision(path)
569             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
570
571         for hostname in self._options.results_server_host:
572             _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
573
574             attrs = [
575                 ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
576                 ('builder_name', builder_name),
577                 ('build_number', build_number),
578                 ('build_slave', build_slave),
579                 ('revisions', json.dumps(revisions)),
580                 ('start_time', str(start_time)),
581                 ('end_time', str(end_time)),
582             ]
583
584             uploader = FileUploader("http://%s/api/report" % hostname, 360)
585             try:
586                 response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
587                 if not response:
588                     _log.error("JSON upload failed; no response returned")
589                     continue
590
591                 if response.code != 200:
592                     _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
593                     continue
594
595                 response_text = response.read()
596                 try:
597                     response_json = json.loads(response_text)
598                 except ValueError as error:
599                     _log.error("JSON upload failed; failed to parse the response: %s", response_text)
600                     continue
601
602                 if response_json['status'] != 'OK':
603                     _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
604                     continue
605
606                 _log.info("JSON uploaded.")
607             except Exception as error:
608                 _log.error("Upload failed: %s" % error)
609                 continue
610
611     def _copy_results_html_file(self, destination_path):
612         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
613         results_file = self._filesystem.join(base_dir, 'results.html')
614         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
615         # so make sure it exists before we try to copy it.
616         if self._filesystem.exists(results_file):
617             self._filesystem.copyfile(results_file, destination_path)
618
619     def _stats_trie(self, initial_results):
620         def _worker_number(worker_name):
621             return int(worker_name.split('/')[1]) if worker_name else -1
622
623         stats = {}
624         for result in initial_results.results_by_name.values():
625             if result.type != test_expectations.SKIP:
626                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
627         stats_trie = {}
628         for name, value in stats.iteritems():
629             json_results_generator.add_path_to_trie(name, value, stats_trie)
630         return stats_trie
631
632     def _print_expectation_line_for_test(self, format_string, test, device_type=None):
633         line = self._expectations[device_type].model().get_expectation_line(test)
634         print(format_string.format(test, line.expected_behavior, self._expectations[device_type].readable_filename_and_line_number(line), line.original_string or ''))
635
636     def _print_expectations_for_subset(self, device_type, test_col_width, tests_to_run, tests_to_skip={}):
637         format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
638         if tests_to_skip:
639             print('')
640             print('Tests to skip ({})'.format(len(tests_to_skip)))
641             for test in sorted(tests_to_skip):
642                 self._print_expectation_line_for_test(format_string, test, device_type=device_type)
643
644         print('')
645         print('Tests to run{} ({})'.format(' for ' + str(device_type) if device_type else '', len(tests_to_run)))
646         for test in sorted(tests_to_run):
647             self._print_expectation_line_for_test(format_string, test, device_type=device_type)
648
649     def print_expectations(self, args):
650         aggregate_test_names = set()
651         aggregate_tests_to_run = set()
652         aggregate_tests_to_skip = set()
653         tests_to_run_by_device = {}
654
655         device_type_list = self._port.DEFAULT_DEVICE_TYPES or [self._port.DEVICE_TYPE]
656         for device_type in device_type_list:
657             """Run the tests and return a RunDetails object with the results."""
658             for_device_type = 'for {} '.format(device_type) if device_type else ''
659             self._printer.write_update('Collecting tests {}...'.format(for_device_type))
660             try:
661                 paths, test_names = self._collect_tests(args, device_type=device_type)
662             except IOError:
663                 # This is raised if --test-list doesn't exist
664                 return test_run_results.RunDetails(exit_code=-1)
665
666             self._printer.write_update('Parsing expectations {}...'.format(for_device_type))
667             self._expectations[device_type] = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force, device_type=device_type)
668             self._expectations[device_type].parse_all_expectations()
669
670             aggregate_test_names.update(test_names)
671             tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names, device_type=device_type)
672             aggregate_tests_to_skip.update(tests_to_skip)
673
674             tests_to_run_by_device[device_type] = [test for test in tests_to_run if test not in aggregate_tests_to_run]
675             aggregate_tests_to_run.update(tests_to_run)
676
677         aggregate_tests_to_skip = aggregate_tests_to_skip - aggregate_tests_to_run
678
679         self._printer.print_found(len(aggregate_test_names), len(aggregate_tests_to_run), self._options.repeat_each, self._options.iterations)
680         test_col_width = len(max(aggregate_tests_to_run.union(aggregate_tests_to_skip), key=len)) + 1
681
682         self._print_expectations_for_subset(device_type_list[0], test_col_width, tests_to_run_by_device[device_type_list[0]], aggregate_tests_to_skip)
683
684         for device_type in device_type_list[1:]:
685             self._print_expectations_for_subset(device_type, test_col_width, tests_to_run_by_device[device_type])
686
687         return 0