[WinCairo] httpd service install needs to precede server start
[WebKit-https.git] / Tools / Scripts / webkitpy / layout_tests / controllers / manager.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
13 # distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 """
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
35 """
36
37 import json
38 import logging
39 import random
40 import sys
41 import time
42 from collections import defaultdict
43
44 from webkitpy.common.checkout.scm.detection import SCMDetector
45 from webkitpy.common.net.file_uploader import FileUploader
46 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
47 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
48 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
49 from webkitpy.layout_tests.layout_package import json_layout_results_generator
50 from webkitpy.layout_tests.layout_package import json_results_generator
51 from webkitpy.layout_tests.models import test_expectations
52 from webkitpy.layout_tests.models import test_failures
53 from webkitpy.layout_tests.models import test_results
54 from webkitpy.layout_tests.models import test_run_results
55 from webkitpy.layout_tests.models.test_input import TestInput
56 from webkitpy.layout_tests.models.test_run_results import INTERRUPTED_EXIT_STATUS
57 from webkitpy.tool.grammar import pluralize
58
59 _log = logging.getLogger(__name__)
60
61 TestExpectations = test_expectations.TestExpectations
62
63
64 class Manager(object):
65     """A class for managing running a series of tests on a series of layout
66     test files."""
67
68     def __init__(self, port, options, printer):
69         """Initialize test runner data structures.
70
71         Args:
72           port: an object implementing port-specific
73           options: a dictionary of command line options
74           printer: a Printer object to record updates to.
75         """
76         self._port = port
77         self._filesystem = port.host.filesystem
78         self._options = options
79         self._printer = printer
80         self._expectations = None
81         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR + 'test'
82         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
83         self.web_platform_test_subdir = self._port.web_platform_test_server_doc_root()
84         self.webkit_specific_web_platform_test_subdir = 'http' + port.TEST_PATH_SEPARATOR + 'wpt' + port.TEST_PATH_SEPARATOR
85         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
86         self._results_directory = self._port.results_directory()
87         self._finder = LayoutTestFinder(self._port, self._options)
88         self._runner = None
89
90         test_options_json_path = self._port.path_from_webkit_base(self.LAYOUT_TESTS_DIRECTORY, "tests-options.json")
91         self._tests_options = json.loads(self._filesystem.read_text_file(test_options_json_path)) if self._filesystem.exists(test_options_json_path) else {}
92
93     def _collect_tests(self, args):
94         return self._finder.find_tests(self._options, args)
95
96     def _is_http_test(self, test):
97         return self.HTTP_SUBDIR in test or self._is_websocket_test(test) or self._needs_web_platform_test(test)
98
99     def _is_websocket_test(self, test):
100         return self.WEBSOCKET_SUBDIR in test
101
102     def _needs_web_platform_test(self, test):
103         return self.web_platform_test_subdir in test or self.webkit_specific_web_platform_test_subdir in test
104
105     def _custom_device_for_test(self, test):
106         for device_class in self._port.CUSTOM_DEVICE_CLASSES:
107             directory_suffix = device_class.lower().replace(' ', '') + self._port.TEST_PATH_SEPARATOR
108             if directory_suffix in test:
109                 return device_class
110         return None
111
112     def _http_tests(self, test_names):
113         return set(test for test in test_names if self._is_http_test(test))
114
115     def _prepare_lists(self, paths, test_names):
116         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
117         tests_to_run = [test for test in test_names if test not in tests_to_skip]
118
119         # Create a sorted list of test files so the subset chunk,
120         # if used, contains alphabetically consecutive tests.
121         if self._options.order == 'natural':
122             tests_to_run.sort(key=self._port.test_key)
123         elif self._options.order == 'random':
124             random.shuffle(tests_to_run)
125
126         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
127         self._expectations.add_skipped_tests(tests_in_other_chunks)
128         tests_to_skip.update(tests_in_other_chunks)
129
130         return tests_to_run, tests_to_skip
131
132     def _test_input_for_file(self, test_file):
133         return TestInput(test_file,
134             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
135             self._is_http_test(test_file),
136             should_dump_jsconsolelog_in_stderr=self._test_should_dump_jsconsolelog_in_stderr(test_file))
137
138     def _test_is_slow(self, test_file):
139         if self._expectations.model().has_modifier(test_file, test_expectations.SLOW):
140             return True
141         return "slow" in self._tests_options.get(test_file, [])
142
143     def _test_should_dump_jsconsolelog_in_stderr(self, test_file):
144         return self._expectations.model().has_modifier(test_file, test_expectations.DUMPJSCONSOLELOGINSTDERR)
145
146     def needs_servers(self, test_names):
147         return any(self._is_http_test(test_name) for test_name in test_names) and self._options.http
148
149     def _get_test_inputs(self, tests_to_run, repeat_each, iterations):
150         test_inputs = []
151         for _ in xrange(iterations):
152             for test in tests_to_run:
153                 for _ in xrange(repeat_each):
154                     test_inputs.append(self._test_input_for_file(test))
155         return test_inputs
156
157     def _update_worker_count(self, test_names):
158         test_inputs = self._get_test_inputs(test_names, self._options.repeat_each, self._options.iterations)
159         worker_count = self._runner.get_worker_count(test_inputs, int(self._options.child_processes))
160         self._options.child_processes = worker_count
161
162     def _set_up_run(self, test_names, device_class=None):
163         self._printer.write_update("Checking build ...")
164         if not self._port.check_build():
165             _log.error("Build check failed")
166             return False
167
168         self._options.device_class = device_class
169
170         # This must be started before we check the system dependencies,
171         # since the helper may do things to make the setup correct.
172         self._printer.write_update("Starting helper ...")
173         if not self._port.start_helper(self._options.pixel_tests):
174             return False
175
176         self._update_worker_count(test_names)
177         self._port.reset_preferences()
178
179         # Check that the system dependencies (themes, fonts, ...) are correct.
180         if not self._options.nocheck_sys_deps:
181             self._printer.write_update("Checking system dependencies ...")
182             if not self._port.check_sys_deps():
183                 self._port.stop_helper()
184                 return False
185
186         if self._options.clobber_old_results:
187             self._clobber_old_results()
188
189         # Create the output directory if it doesn't already exist.
190         self._port.host.filesystem.maybe_make_directory(self._results_directory)
191
192         self._port.setup_test_run(self._options.device_class)
193         return True
194
195     def run(self, args):
196         """Run the tests and return a RunDetails object with the results."""
197         self._printer.write_update("Collecting tests ...")
198         try:
199             paths, test_names = self._collect_tests(args)
200         except IOError:
201             # This is raised if --test-list doesn't exist
202             return test_run_results.RunDetails(exit_code=-1)
203
204         self._printer.write_update("Parsing expectations ...")
205         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
206         self._expectations.parse_all_expectations()
207
208         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
209         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
210         start_time = time.time()
211
212         # Check to make sure we're not skipping every test.
213         if not tests_to_run:
214             _log.critical('No tests to run.')
215             return test_run_results.RunDetails(exit_code=-1)
216
217         default_device_tests = []
218
219         # Look for tests with custom device requirements.
220         custom_device_tests = defaultdict(list)
221         for test_file in tests_to_run:
222             custom_device = self._custom_device_for_test(test_file)
223             if custom_device:
224                 custom_device_tests[custom_device].append(test_file)
225             else:
226                 default_device_tests.append(test_file)
227
228         if custom_device_tests:
229             for device_class in custom_device_tests:
230                 _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))
231
232         initial_results = None
233         retry_results = None
234         enabled_pixel_tests_in_retry = False
235
236         needs_http = any((self._is_http_test(test) and not self._needs_web_platform_test(test)) for test in tests_to_run)
237         needs_web_platform_test_server = any(self._needs_web_platform_test(test) for test in tests_to_run)
238         needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
239         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow,
240                                         needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)
241
242         if default_device_tests:
243             _log.info('')
244             _log.info("Running %s", pluralize(len(tests_to_run), "test"))
245             _log.info('')
246             if not self._set_up_run(tests_to_run):
247                 return test_run_results.RunDetails(exit_code=-1)
248
249             initial_results, retry_results, enabled_pixel_tests_in_retry = self._run_test_subset(default_device_tests, tests_to_skip)
250
251         for device_class in custom_device_tests:
252             device_tests = custom_device_tests[device_class]
253             if device_tests:
254                 _log.info('')
255                 _log.info('Running %s for %s', pluralize(len(device_tests), "test"), device_class)
256                 _log.info('')
257                 if not self._set_up_run(device_tests, device_class):
258                     return test_run_results.RunDetails(exit_code=-1)
259
260                 device_initial_results, device_retry_results, device_enabled_pixel_tests_in_retry = self._run_test_subset(device_tests, tests_to_skip)
261
262                 initial_results = initial_results.merge(device_initial_results) if initial_results else device_initial_results
263                 retry_results = retry_results.merge(device_retry_results) if retry_results else device_retry_results
264                 enabled_pixel_tests_in_retry |= device_enabled_pixel_tests_in_retry
265
266         self._runner.stop_servers()
267         end_time = time.time()
268         return self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)
269
270     def _run_test_subset(self, tests_to_run, tests_to_skip):
271         try:
272             enabled_pixel_tests_in_retry = False
273             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False)
274
275             tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
276             # Don't retry failures when interrupted by user or failures limit exception.
277             retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
278             if retry_failures and tests_to_retry:
279                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
280
281                 _log.info('')
282                 _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
283                 _log.info('')
284                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1, num_workers=1, retrying=True)
285
286                 if enabled_pixel_tests_in_retry:
287                     self._options.pixel_tests = False
288             else:
289                 retry_results = None
290         finally:
291             self._clean_up_run()
292
293         return (initial_results, retry_results, enabled_pixel_tests_in_retry)
294
295     def _end_test_run(self, start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry):
296         # Some crash logs can take a long time to be written out so look
297         # for new logs after the test run finishes.
298
299         _log.debug("looking for new crash logs")
300         self._look_for_new_crash_logs(initial_results, start_time)
301         if retry_results:
302             self._look_for_new_crash_logs(retry_results, start_time)
303
304         _log.debug("summarizing results")
305         summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
306         results_including_passes = None
307         if self._options.results_server_host:
308             results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True, include_time_and_modifiers=True)
309         self._printer.print_results(end_time - start_time, initial_results, summarized_results)
310
311         exit_code = -1
312         if not self._options.dry_run:
313             self._port.print_leaks_summary()
314             self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
315
316             results_path = self._filesystem.join(self._results_directory, "results.html")
317             self._copy_results_html_file(results_path)
318             if initial_results.keyboard_interrupted:
319                 exit_code = INTERRUPTED_EXIT_STATUS
320             else:
321                 if self._options.show_results and (initial_results.unexpected_results_by_name or
322                     (self._options.full_results_html and initial_results.total_failures)):
323                     self._port.show_results_html_file(results_path)
324                 exit_code = self._port.exit_code_from_summarized_results(summarized_results)
325         return test_run_results.RunDetails(exit_code, summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
326
327     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
328         test_inputs = self._get_test_inputs(tests_to_run, repeat_each, iterations)
329
330         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
331
332     def _clean_up_run(self):
333         _log.debug("Flushing stdout")
334         sys.stdout.flush()
335         _log.debug("Flushing stderr")
336         sys.stderr.flush()
337         _log.debug("Stopping helper")
338         self._port.stop_helper()
339         _log.debug("Cleaning up port")
340         self._port.clean_up_test_run()
341
342     def _force_pixel_tests_if_needed(self):
343         if self._options.pixel_tests:
344             return False
345
346         _log.debug("Restarting helper")
347         self._port.stop_helper()
348         self._options.pixel_tests = True
349         return self._port.start_helper()
350
351     def _look_for_new_crash_logs(self, run_results, start_time):
352         """Since crash logs can take a long time to be written out if the system is
353            under stress do a second pass at the end of the test run.
354
355            run_results: the results of the test run
356            start_time: time the tests started at.  We're looking for crash
357                logs after that time.
358         """
359         crashed_processes = []
360         for test, result in run_results.unexpected_results_by_name.iteritems():
361             if (result.type != test_expectations.CRASH):
362                 continue
363             for failure in result.failures:
364                 if not isinstance(failure, test_failures.FailureCrash):
365                     continue
366                 crashed_processes.append([test, failure.process_name, failure.pid])
367
368         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
369         if sample_files:
370             for test, sample_file in sample_files.iteritems():
371                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
372                 writer.copy_sample_file(sample_file)
373
374         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
375         if crash_logs:
376             for test, crash_log in crash_logs.iteritems():
377                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
378                 writer.write_crash_log(crash_log)
379
380                 # Check if this crashing 'test' is already in list of crashed_processes, if not add it to the run_results
381                 if not any(process[0] == test for process in crashed_processes):
382                     result = test_results.TestResult(test)
383                     result.type = test_expectations.CRASH
384                     result.is_other_crash = True
385                     run_results.add(result, expected=False, test_is_slow=False)
386                     _log.debug("Adding results for other crash: " + str(test))
387
388     def _clobber_old_results(self):
389         # Just clobber the actual test results directories since the other
390         # files in the results directory are explicitly used for cross-run
391         # tracking.
392         self._printer.write_update("Clobbering old results in %s" %
393                                    self._results_directory)
394         layout_tests_dir = self._port.layout_tests_dir()
395         possible_dirs = self._port.test_dirs()
396         for dirname in possible_dirs:
397             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
398                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
399
400     def _tests_to_retry(self, run_results, include_crashes):
401         return [result.test_name for result in run_results.unexpected_results_by_name.values() if
402                    ((result.type != test_expectations.PASS) and
403                     (result.type != test_expectations.MISSING) and
404                     (result.type != test_expectations.CRASH or include_crashes))]
405
406     def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
407         """Writes the results of the test run as JSON files into the results
408         dir and upload the files to the appengine server.
409
410         Args:
411           summarized_results: dict of results
412           initial_results: full summary object
413         """
414         _log.debug("Writing JSON files in %s." % self._results_directory)
415
416         # FIXME: Upload stats.json to the server and delete times_ms.
417         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
418         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
419         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
420
421         stats_trie = self._stats_trie(initial_results)
422         stats_path = self._filesystem.join(self._results_directory, "stats.json")
423         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
424
425         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
426         # We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
427         json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
428
429         results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
430         if results_including_passes:
431             json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
432
433         generator = json_layout_results_generator.JSONLayoutResultsGenerator(
434             self._port, self._options.builder_name, self._options.build_name,
435             self._options.build_number, self._results_directory,
436             self._expectations, initial_results,
437             self._options.test_results_server,
438             "layout-tests",
439             self._options.master_name)
440
441         if generator.generate_json_output():
442             _log.debug("Finished writing JSON file for the test results server.")
443         else:
444             _log.debug("Failed to generate JSON file for the test results server.")
445             return
446
447         json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
448
449         generator.upload_json_files(json_files)
450         if results_including_passes:
451             self.upload_results(results_json_path, start_time, end_time)
452
453         incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
454
455         # Remove these files from the results directory so they don't take up too much space on the buildbot.
456         # The tools use the version we uploaded to the results server anyway.
457         self._filesystem.remove(times_json_path)
458         self._filesystem.remove(incremental_results_path)
459         if results_including_passes:
460             self._filesystem.remove(results_json_path)
461
462     def upload_results(self, results_json_path, start_time, end_time):
463         if not self._options.results_server_host:
464             return
465         master_name = self._options.master_name
466         builder_name = self._options.builder_name
467         build_number = self._options.build_number
468         build_slave = self._options.build_slave
469         if not master_name or not builder_name or not build_number or not build_slave:
470             _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
471             return
472
473         revisions = {}
474         # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
475         for (name, path) in self._port.repository_paths():
476             scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
477             revision = scm.native_revision(path)
478             revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}
479
480         for hostname in self._options.results_server_host:
481             _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)
482
483             attrs = [
484                 ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
485                 ('builder_name', builder_name),
486                 ('build_number', build_number),
487                 ('build_slave', build_slave),
488                 ('revisions', json.dumps(revisions)),
489                 ('start_time', str(start_time)),
490                 ('end_time', str(end_time)),
491             ]
492
493             uploader = FileUploader("http://%s/api/report" % hostname, 360)
494             try:
495                 response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
496                 if not response:
497                     _log.error("JSON upload failed; no response returned")
498                     continue
499
500                 if response.code != 200:
501                     _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
502                     continue
503
504                 response_text = response.read()
505                 try:
506                     response_json = json.loads(response_text)
507                 except ValueError as error:
508                     _log.error("JSON upload failed; failed to parse the response: %s", response_text)
509                     continue
510
511                 if response_json['status'] != 'OK':
512                     _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
513                     continue
514
515                 _log.info("JSON uploaded.")
516             except Exception as error:
517                 _log.error("Upload failed: %s" % error)
518                 continue
519
520     def _copy_results_html_file(self, destination_path):
521         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
522         results_file = self._filesystem.join(base_dir, 'results.html')
523         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
524         # so make sure it exists before we try to copy it.
525         if self._filesystem.exists(results_file):
526             self._filesystem.copyfile(results_file, destination_path)
527
528     def _stats_trie(self, initial_results):
529         def _worker_number(worker_name):
530             return int(worker_name.split('/')[1]) if worker_name else -1
531
532         stats = {}
533         for result in initial_results.results_by_name.values():
534             if result.type != test_expectations.SKIP:
535                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
536         stats_trie = {}
537         for name, value in stats.iteritems():
538             json_results_generator.add_path_to_trie(name, value, stats_trie)
539         return stats_trie
540
541     def _print_expectation_line_for_test(self, format_string, test):
542         line = self._expectations.model().get_expectation_line(test)
543         print(format_string.format(test, line.expected_behavior, self._expectations.readable_filename_and_line_number(line), line.original_string or ''))
544     
545     def _print_expectations_for_subset(self, device_class, test_col_width, tests_to_run, tests_to_skip={}):
546         format_string = '{{:{width}}} {{}} {{}} {{}}'.format(width=test_col_width)
547         if tests_to_skip:
548             print('')
549             print('Tests to skip ({})'.format(len(tests_to_skip)))
550             for test in sorted(tests_to_skip):
551                 self._print_expectation_line_for_test(format_string, test)
552
553         print('')
554         print('Tests to run{} ({})'.format(' for ' + device_class if device_class else '', len(tests_to_run)))
555         for test in sorted(tests_to_run):
556             self._print_expectation_line_for_test(format_string, test)
557
558     def print_expectations(self, args):
559         self._printer.write_update("Collecting tests ...")
560         try:
561             paths, test_names = self._collect_tests(args)
562         except IOError:
563             # This is raised if --test-list doesn't exist
564             return -1
565
566         self._printer.write_update("Parsing expectations ...")
567         self._expectations = test_expectations.TestExpectations(self._port, test_names, force_expectations_pass=self._options.force)
568         self._expectations.parse_all_expectations()
569
570         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
571         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
572
573         test_col_width = len(max(tests_to_run + list(tests_to_skip), key=len)) + 1
574
575         default_device_tests = []
576
577         # Look for tests with custom device requirements.
578         custom_device_tests = defaultdict(list)
579         for test_file in tests_to_run:
580             custom_device = self._custom_device_for_test(test_file)
581             if custom_device:
582                 custom_device_tests[custom_device].append(test_file)
583             else:
584                 default_device_tests.append(test_file)
585
586         if custom_device_tests:
587             for device_class in custom_device_tests:
588                 _log.debug('{} tests use device {}'.format(len(custom_device_tests[device_class]), device_class))
589
590         self._print_expectations_for_subset(None, test_col_width, tests_to_run, tests_to_skip)
591
592         for device_class in custom_device_tests:
593             device_tests = custom_device_tests[device_class]
594             self._print_expectations_for_subset(device_class, test_col_width, device_tests)
595
596         return 0