Unreviewed. Update W3C WebDriver imported tests.
[WebKit-https.git] / WebDriverTests / imported / w3c / tools / wptrunner / wptrunner / wptrunner.py
1 from __future__ import unicode_literals
2
3 import json
4 import os
5 import sys
6
7 import environment as env
8 import products
9 import testloader
10 import wptcommandline
11 import wptlogging
12 import wpttest
13 from font import FontInstaller
14 from testrunner import ManagerGroup
15 from browsers.base import NullBrowser
16
17 here = os.path.split(__file__)[0]
18
19 logger = None
20
21 """Runner for web-platform-tests
22
23 The runner has several design goals:
24
25 * Tests should run with no modification from upstream.
26
27 * Tests should be regarded as "untrusted" so that errors, timeouts and even
28   crashes in the tests can be handled without failing the entire test run.
29
30 * For performance tests can be run in multiple browsers in parallel.
31
32 The upstream repository has the facility for creating a test manifest in JSON
33 format. This manifest is used directly to determine which tests exist. Local
34 metadata files are used to store the expected test results.
35 """
36
37 def setup_logging(*args, **kwargs):
38     global logger
39     logger = wptlogging.setup(*args, **kwargs)
40
41
42 def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, **kwargs):
43     if run_info_extras is None:
44         run_info_extras = {}
45
46     run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
47                                     extras=run_info_extras)
48
49     test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
50                                                manifest_download=kwargs["manifest_download"]).load()
51
52     manifest_filters = []
53     meta_filters = []
54
55     if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"]:
56         manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
57                                                       exclude=kwargs["exclude"],
58                                                       manifest_path=kwargs["include_manifest"],
59                                                       test_manifests=test_manifests))
60     if kwargs["tags"]:
61         meta_filters.append(testloader.TagFilter(tags=kwargs["tags"]))
62
63     test_loader = testloader.TestLoader(test_manifests,
64                                         kwargs["test_types"],
65                                         run_info,
66                                         manifest_filters=manifest_filters,
67                                         meta_filters=meta_filters,
68                                         chunk_type=kwargs["chunk_type"],
69                                         total_chunks=kwargs["total_chunks"],
70                                         chunk_number=kwargs["this_chunk"],
71                                         include_https=ssl_env.ssl_enabled,
72                                         skip_timeout=kwargs["skip_timeout"])
73     return run_info, test_loader
74
75
76 def list_test_groups(test_paths, product, **kwargs):
77     env.do_delayed_imports(logger, test_paths)
78
79     ssl_env = env.ssl_env(logger, **kwargs)
80
81     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
82
83     run_info, test_loader = get_loader(test_paths, product, ssl_env,
84                                        run_info_extras=run_info_extras, **kwargs)
85
86     for item in sorted(test_loader.groups(kwargs["test_types"])):
87         print item
88
89
90 def list_disabled(test_paths, product, **kwargs):
91     env.do_delayed_imports(logger, test_paths)
92
93     rv = []
94
95     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
96
97     ssl_env = env.ssl_env(logger, **kwargs)
98
99     run_info, test_loader = get_loader(test_paths, product, ssl_env,
100                                        run_info_extras=run_info_extras, **kwargs)
101
102     for test_type, tests in test_loader.disabled_tests.iteritems():
103         for test in tests:
104             rv.append({"test": test.id, "reason": test.disabled()})
105     print json.dumps(rv, indent=2)
106
107
108 def list_tests(test_paths, product, **kwargs):
109     env.do_delayed_imports(logger, test_paths)
110
111     rv = []
112
113     ssl_env = env.ssl_env(logger, **kwargs)
114
115     run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
116
117     run_info, test_loader = get_loader(test_paths, product, ssl_env,
118                                        run_info_extras=run_info_extras, **kwargs)
119
120     for test in test_loader.test_ids:
121         print test
122
123
124 def get_pause_after_test(test_loader, **kwargs):
125     total_tests = sum(len(item) for item in test_loader.tests.itervalues())
126     if kwargs["pause_after_test"] is None:
127         if kwargs["repeat_until_unexpected"]:
128             return False
129         if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and total_tests == 1:
130             return True
131         return False
132     return kwargs["pause_after_test"]
133
134
135 def run_tests(config, test_paths, product, **kwargs):
136     with wptlogging.CaptureIO(logger, not kwargs["no_capture_stdio"]):
137         env.do_delayed_imports(logger, test_paths)
138
139         (check_args,
140          target_browser_cls, get_browser_kwargs,
141          executor_classes, get_executor_kwargs,
142          env_options, get_env_extras, run_info_extras) = products.load_product(config, product)
143
144         ssl_env = env.ssl_env(logger, **kwargs)
145         env_extras = get_env_extras(**kwargs)
146
147         check_args(**kwargs)
148
149         if kwargs["install_fonts"]:
150             env_extras.append(FontInstaller(
151                 font_dir=kwargs["font_dir"],
152                 ahem=os.path.join(kwargs["tests_root"], "fonts/Ahem.ttf")
153             ))
154
155         if "test_loader" in kwargs:
156             run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=None,
157                                             extras=run_info_extras(**kwargs))
158             test_loader = kwargs["test_loader"]
159         else:
160             run_info, test_loader = get_loader(test_paths,
161                                                product,
162                                                ssl_env,
163                                                run_info_extras=run_info_extras(**kwargs),
164                                                **kwargs)
165
166         test_source_kwargs = {"processes": kwargs["processes"]}
167         if kwargs["run_by_dir"] is False:
168             test_source_cls = testloader.SingleTestSource
169         else:
170             # A value of None indicates infinite depth
171             test_source_cls = testloader.PathGroupedSource
172             test_source_kwargs["depth"] = kwargs["run_by_dir"]
173
174         logger.info("Using %i client processes" % kwargs["processes"])
175
176         unexpected_total = 0
177
178         kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
179
180         with env.TestEnvironment(test_paths,
181                                  ssl_env,
182                                  kwargs["pause_after_test"],
183                                  kwargs["debug_info"],
184                                  env_options,
185                                  env_extras) as test_environment:
186             try:
187                 test_environment.ensure_started()
188             except env.TestEnvironmentError as e:
189                 logger.critical("Error starting test environment: %s" % e.message)
190                 raise
191
192             repeat = kwargs["repeat"]
193             repeat_count = 0
194             repeat_until_unexpected = kwargs["repeat_until_unexpected"]
195
196             while repeat_count < repeat or repeat_until_unexpected:
197                 repeat_count += 1
198                 if repeat_until_unexpected:
199                     logger.info("Repetition %i" % (repeat_count))
200                 elif repeat > 1:
201                     logger.info("Repetition %i / %i" % (repeat_count, repeat))
202
203                 unexpected_count = 0
204                 logger.suite_start(test_loader.test_ids, run_info)
205                 for test_type in kwargs["test_types"]:
206                     logger.info("Running %s tests" % test_type)
207
208                     # WebDriver tests may create and destroy multiple browser
209                     # processes as part of their expected behavior. These
210                     # processes are managed by a WebDriver server binary. This
211                     # obviates the need for wptrunner to provide a browser, so
212                     # the NullBrowser is used in place of the "target" browser
213                     if test_type == "wdspec":
214                         browser_cls = NullBrowser
215                     else:
216                         browser_cls = target_browser_cls
217
218                     browser_kwargs = get_browser_kwargs(test_type,
219                                                         run_info,
220                                                         ssl_env=ssl_env,
221                                                         **kwargs)
222
223                     executor_cls = executor_classes.get(test_type)
224                     executor_kwargs = get_executor_kwargs(test_type,
225                                                           test_environment.external_config,
226                                                           test_environment.cache_manager,
227                                                           run_info,
228                                                           **kwargs)
229
230                     if executor_cls is None:
231                         logger.error("Unsupported test type %s for product %s" %
232                                      (test_type, product))
233                         continue
234
235                     for test in test_loader.disabled_tests[test_type]:
236                         logger.test_start(test.id)
237                         logger.test_end(test.id, status="SKIP")
238
239                     if test_type == "testharness":
240                         run_tests = {"testharness": []}
241                         for test in test_loader.tests["testharness"]:
242                             if test.testdriver and not executor_cls.supports_testdriver:
243                                 logger.test_start(test.id)
244                                 logger.test_end(test.id, status="SKIP")
245                             else:
246                                 run_tests["testharness"].append(test)
247                     else:
248                         run_tests = test_loader.tests
249
250                     with ManagerGroup("web-platform-tests",
251                                       kwargs["processes"],
252                                       test_source_cls,
253                                       test_source_kwargs,
254                                       browser_cls,
255                                       browser_kwargs,
256                                       executor_cls,
257                                       executor_kwargs,
258                                       kwargs["rerun"],
259                                       kwargs["pause_after_test"],
260                                       kwargs["pause_on_unexpected"],
261                                       kwargs["restart_on_unexpected"],
262                                       kwargs["debug_info"]) as manager_group:
263                         try:
264                             manager_group.run(test_type, run_tests)
265                         except KeyboardInterrupt:
266                             logger.critical("Main thread got signal")
267                             manager_group.stop()
268                             raise
269                     unexpected_count += manager_group.unexpected_count()
270
271                 unexpected_total += unexpected_count
272                 logger.info("Got %i unexpected results" % unexpected_count)
273                 if repeat_until_unexpected and unexpected_total > 0:
274                     break
275                 logger.suite_end()
276     return unexpected_total == 0
277
278
279 def check_stability(**kwargs):
280     import stability
281     return stability.check_stability(logger, **kwargs)
282
283
284 def start(**kwargs):
285     if kwargs["list_test_groups"]:
286         list_test_groups(**kwargs)
287     elif kwargs["list_disabled"]:
288         list_disabled(**kwargs)
289     elif kwargs["list_tests"]:
290         list_tests(**kwargs)
291     elif kwargs["verify"]:
292         check_stability(**kwargs)
293     else:
294         return not run_tests(**kwargs)
295
296
297 def main():
298     """Main entry point when calling from the command line"""
299     kwargs = wptcommandline.parse_args()
300
301     try:
302         if kwargs["prefs_root"] is None:
303             kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
304
305         setup_logging(kwargs, {"raw": sys.stdout})
306
307         return start(**kwargs)
308     except Exception:
309         if kwargs["pdb"]:
310             import pdb, traceback
311             print traceback.format_exc()
312             pdb.post_mortem()
313         else:
314             raise