9855ecc672c9c6c2c78001fe5f63ccac5ddd6816
[WebKit-https.git] / WebDriverTests / imported / w3c / tools / wptrunner / wptrunner / wptrunner.py
1 from __future__ import unicode_literals
2
3 import json
4 import os
5 import sys
6
7 import environment as env
8 import products
9 import testloader
10 import wptcommandline
11 import wptlogging
12 import wpttest
13 from font import FontInstaller
14 from testrunner import ManagerGroup
15 from browsers.base import NullBrowser
16
17 here = os.path.split(__file__)[0]
18
19 logger = None
20
21 """Runner for web-platform-tests
22
23 The runner has several design goals:
24
25 * Tests should run with no modification from upstream.
26
27 * Tests should be regarded as "untrusted" so that errors, timeouts and even
28   crashes in the tests can be handled without failing the entire test run.
29
30 * For performance tests can be run in multiple browsers in parallel.
31
32 The upstream repository has the facility for creating a test manifest in JSON
33 format. This manifest is used directly to determine which tests exist. Local
34 metadata files are used to store the expected test results.
35 """
36
37 def setup_logging(*args, **kwargs):
38     global logger
39     logger = wptlogging.setup(*args, **kwargs)
40
41 def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
42     if run_info_extras is None:
43         run_info_extras = {}
44
45     run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
46                                     extras=run_info_extras)
47
48     test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
49
50     manifest_filters = []
51     meta_filters = []
52
53     if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
54         manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
55                                                       exclude=kwargs["exclude"],
56                                                       manifest_path=kwargs["include_manifest"],
57                                                       test_manifests=test_manifests))
58     if kwargs["tags"]:
59         meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
60
61     test_loader = testloader.TestLoader(test_manifests,
62                                         kwargs["test_types"],
63                                         run_info,
64                                         manifest_filters=manifest_filters,
65                                         meta_filters=meta_filters,
66                                         chunk_type=kwargs["chunk_type"],
67                                         total_chunks=kwargs["total_chunks"],
68                                         chunk_number=kwargs["this_chunk"],
69                                         include_https=ssl_env.ssl_enabled)
70     return run_info, test_loader
71
72 def list_test_groups(test_paths, product, **kwargs):
73     env.do_delayed_imports(logger, test_paths)
74
75     ssl_env = env.ssl_env(logger, **kwargs)
76
77     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
78
79     run_info, test_loader = get_loader(test_paths, product, ssl_env,
80                                        run_info_extras=run_info_extras, **kwargs)
81
82     for item in sorted(test_loader.groups(kwargs["test_types"])):
83         print item
84
85
86 def list_disabled(test_paths, product, **kwargs):
87     env.do_delayed_imports(logger, test_paths)
88
89     rv = []
90
91     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
92
93     ssl_env = env.ssl_env(logger, **kwargs)
94
95     run_info, test_loader = get_loader(test_paths, product, ssl_env,
96                                        run_info_extras=run_info_extras, **kwargs)
97
98     for test_type, tests in test_loader.disabled_tests.iteritems():
99         for test in tests:
100             rv.append({"test": test.id, "reason": test.disabled()})
101     print json.dumps(rv, indent=2)
102
103
104 def list_tests(test_paths, product, **kwargs):
105     env.do_delayed_imports(logger, test_paths)
106
107     rv = []
108
109     ssl_env = env.ssl_env(logger, **kwargs)
110
111     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
112
113     run_info, test_loader = get_loader(test_paths, product, ssl_env,
114                                        run_info_extras=run_info_extras, **kwargs)
115
116     for test in test_loader.test_ids:
117         print test
118
119
120 def get_pause_after_test(test_loader, **kwargs):
121     total_tests = sum(len(item) for item in test_loader.tests.itervalues())
122     if kwargs["pause_after_test"] is None:
123         if kwargs["repeat_until_unexpected"]:
124             return False
125         if kwargs["repeat"] == 1 and total_tests == 1:
126             return True
127         return False
128     return kwargs["pause_after_test"]
129
130
131 def run_tests(config, test_paths, product, **kwargs):
132     with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
133         env.do_delayed_imports(logger, test_paths)
134
135         (check_args,
136          target_browser_cls, get_browser_kwargs,
137          executor_classes, get_executor_kwargs,
138          env_options, get_env_extras, run_info_extras) = products.load_product(config, product)
139
140         ssl_env = env.ssl_env(logger, **kwargs)
141         env_extras = get_env_extras(**kwargs)
142
143         check_args(**kwargs)
144
145         if kwargs["install_fonts"]:
146             env_extras.append(FontInstaller(
147                 font_dir=kwargs["font_dir"],
148                 ahem=os.path.join(kwargs["tests_root"], "fonts/Ahem.ttf")
149             ))
150
151         if "test_loader" in kwargs:
152             run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
153                                             extras=run_info_extras(**kwargs))
154             test_loader = kwargs["test_loader"]
155         else:
156             run_info, test_loader = get_loader(test_paths,
157                                                product,
158                                                ssl_env,
159                                                run_info_extras=run_info_extras(**kwargs),
160                                                **kwargs)
161
162         test_source_kwargs = {"processes": kwargs["processes"]}
163         if kwargs["run_by_dir"] is False:
164             test_source_cls = testloader.SingleTestSource
165         else:
166             # A value of None indicates infinite depth
167             test_source_cls = testloader.PathGroupedSource
168             test_source_kwargs["depth"] = kwargs["run_by_dir"]
169
170         logger.info("Using %i client processes" % kwargs["processes"])
171
172         unexpected_total = 0
173
174         kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
175
176         with env.TestEnvironment(test_paths,
177                                  ssl_env,
178                                  kwargs["pause_after_test"],
179                                  kwargs["debug_info"],
180                                  env_options,
181                                  env_extras) as test_environment:
182             try:
183                 test_environment.ensure_started()
184             except env.TestEnvironmentError as e:
185                 logger.critical("Error starting test environment: %s" % e.message)
186                 raise
187
188             repeat = kwargs["repeat"]
189             repeat_count = 0
190             repeat_until_unexpected = kwargs["repeat_until_unexpected"]
191
192             while repeat_count < repeat or repeat_until_unexpected:
193                 repeat_count += 1
194                 if repeat_until_unexpected:
195                     logger.info("Repetition %i" % (repeat_count))
196                 elif repeat > 1:
197                     logger.info("Repetition %i / %i" % (repeat_count, repeat))
198
199                 unexpected_count = 0
200                 logger.suite_start(test_loader.test_ids, run_info)
201                 for test_type in kwargs["test_types"]:
202                     logger.info("Running %s tests" % test_type)
203
204                     # WebDriver tests may create and destroy multiple browser
205                     # processes as part of their expected behavior. These
206                     # processes are managed by a WebDriver server binary. This
207                     # obviates the need for wptrunner to provide a browser, so
208                     # the NullBrowser is used in place of the "target" browser
209                     if test_type == "wdspec":
210                         browser_cls = NullBrowser
211                     else:
212                         browser_cls = target_browser_cls
213
214                     browser_kwargs = get_browser_kwargs(test_type,
215                                                         run_info,
216                                                         ssl_env=ssl_env,
217                                                         **kwargs)
218
219
220                     executor_cls = executor_classes.get(test_type)
221                     executor_kwargs = get_executor_kwargs(test_type,
222                                                           test_environment.external_config,
223                                                           test_environment.cache_manager,
224                                                           run_info,
225                                                           **kwargs)
226
227                     if executor_cls is None:
228                         logger.error("Unsupported test type %s for product %s" %
229                                      (test_type, product))
230                         continue
231
232                     for test in test_loader.disabled_tests[test_type]:
233                         logger.test_start(test.id)
234                         logger.test_end(test.id, status="SKIP")
235
236                     with ManagerGroup("web-platform-tests",
237                                       kwargs["processes"],
238                                       test_source_cls,
239                                       test_source_kwargs,
240                                       browser_cls,
241                                       browser_kwargs,
242                                       executor_cls,
243                                       executor_kwargs,
244                                       kwargs["pause_after_test"],
245                                       kwargs["pause_on_unexpected"],
246                                       kwargs["restart_on_unexpected"],
247                                       kwargs["debug_info"]) as manager_group:
248                         try:
249                             manager_group.run(test_type, test_loader.tests)
250                         except KeyboardInterrupt:
251                             logger.critical("Main thread got signal")
252                             manager_group.stop()
253                             raise
254                     unexpected_count += manager_group.unexpected_count()
255
256                 unexpected_total += unexpected_count
257                 logger.info("Got %i unexpected results" % unexpected_count)
258                 if repeat_until_unexpected and unexpected_total > 0:
259                     break
260                 logger.suite_end()
261     return unexpected_total == 0
262
263 def start(**kwargs):
264     if kwargs["list_test_groups"]:
265         list_test_groups(**kwargs)
266     elif kwargs["list_disabled"]:
267         list_disabled(**kwargs)
268     elif kwargs["list_tests"]:
269         list_tests(**kwargs)
270     else:
271         return not run_tests(**kwargs)
272
273 def main():
274     """Main entry point when calling from the command line"""
275     kwargs = wptcommandline.parse_args()
276
277     try:
278         if kwargs["prefs_root"] is None:
279             kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
280
281         setup_logging(kwargs, {"raw": sys.stdout})
282
283         return start(**kwargs)
284     except Exception:
285         if kwargs["pdb"]:
286             import pdb, traceback
287             print traceback.format_exc()
288             pdb.post_mortem()
289         else:
290             raise