[GTK][WPE] Improve the way glib tests are run
[WebKit-https.git] / Tools / glib / api_test_runner.py
1 #!/usr/bin/env python
2 #
3 # Copyright (C) 2011, 2012, 2017 Igalia S.L.
4 #
5 # This library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Library General Public
7 # License as published by the Free Software Foundation; either
8 # version 2 of the License, or (at your option) any later version.
9 #
10 # This library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 # Library General Public License for more details.
14 #
15 # You should have received a copy of the GNU Library General Public License
16 # along with this library; see the file COPYING.LIB.  If not, write to
17 # the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
18 # Boston, MA 02110-1301, USA.
19
20 import subprocess
21 import os
22 import errno
23 import sys
24 import re
25 from signal import SIGKILL, SIGSEGV
26 from glib_test_runner import GLibTestRunner
27
28 top_level_directory = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
29 sys.path.insert(0, os.path.join(top_level_directory, "Tools", "glib"))
30 import common
31 from webkitpy.common.host import Host
32 from webkitpy.common.test_expectations import TestExpectations
33 from webkitpy.common.timeout_context import Timeout
34
35
36 class TestRunner(object):
37     TEST_DIRS = []
38
39     def __init__(self, port, options, tests=[]):
40         self._options = options
41
42         self._build_type = "Debug" if self._options.debug else "Release"
43         common.set_build_types((self._build_type,))
44         self._port = Host().port_factory.get(port)
45         self._driver = self._create_driver()
46
47         self._programs_path = common.binary_build_path()
48         expectations_file = os.path.join(common.top_level_path(), "Tools", "TestWebKitAPI", "glib", "TestExpectations.json")
49         self._expectations = TestExpectations(self._port.name(), expectations_file, self._build_type)
50         self._tests = self._get_tests(tests)
51         self._disabled_tests = []
52
53     def _test_programs_base_dir(self):
54         return os.path.join(self._programs_path, "TestWebKitAPI")
55
56     def _get_tests_from_dir(self, test_dir):
57         if not os.path.isdir(test_dir):
58             return []
59
60         tests = []
61         for test_file in os.listdir(test_dir):
62             if not test_file.lower().startswith("test"):
63                 continue
64             test_path = os.path.join(test_dir, test_file)
65             if os.path.isfile(test_path) and os.access(test_path, os.X_OK):
66                 tests.append(test_path)
67         return tests
68
69     def _get_tests(self, initial_tests):
70         tests = []
71         for test in initial_tests:
72             if os.path.isdir(test):
73                 tests.extend(self._get_tests_from_dir(test))
74             else:
75                 tests.append(test)
76         if tests:
77             return tests
78
79         tests = []
80         for test_dir in self.TEST_DIRS:
81             absolute_test_dir = os.path.join(self._test_programs_base_dir(), test_dir)
82             tests.extend(self._get_tests_from_dir(absolute_test_dir))
83         return tests
84
85     def _create_driver(self, port_options=[]):
86         self._port._display_server = self._options.display_server
87         driver = self._port.create_driver(worker_number=0, no_timeout=True)._make_driver(pixel_tests=False)
88         if not driver.check_driver(self._port):
89             raise RuntimeError("Failed to check driver %s" % driver.__class__.__name__)
90         return driver
91
92     def _setup_testing_environment(self):
93         self._test_env = self._driver._setup_environ_for_test()
94         self._test_env["TEST_WEBKIT_API_WEBKIT2_RESOURCES_PATH"] = common.top_level_path("Tools", "TestWebKitAPI", "Tests", "WebKit")
95         self._test_env["TEST_WEBKIT_API_WEBKIT2_INJECTED_BUNDLE_PATH"] = common.library_build_path()
96         self._test_env["WEBKIT_EXEC_PATH"] = self._programs_path
97
98         return True
99
100     def _tear_down_testing_environment(self):
101         if self._driver:
102             self._driver.stop()
103
104     def _test_cases_to_skip(self, test_program):
105         if self._options.skipped_action != 'skip':
106             return []
107
108         return self._expectations.skipped_subtests(os.path.basename(test_program))
109
110     def _should_run_test_program(self, test_program):
111         for disabled_test in self._disabled_tests:
112             if test_program.endswith(disabled_test):
113                 return False
114
115         if self._options.skipped_action != 'skip':
116             return True
117
118         return os.path.basename(test_program) not in self._expectations.skipped_tests()
119
120     def _kill_process(self, pid):
121         try:
122             os.kill(pid, SIGKILL)
123         except OSError:
124             # Process already died.
125             pass
126
127     def _waitpid(self, pid):
128         while True:
129             try:
130                 dummy, status = os.waitpid(pid, 0)
131                 if os.WIFSIGNALED(status):
132                     return -os.WTERMSIG(status)
133                 if os.WIFEXITED(status):
134                     return os.WEXITSTATUS(status)
135
136                 # Should never happen
137                 raise RuntimeError("Unknown child exit status!")
138             except (OSError, IOError) as e:
139                 if e.errno == errno.EINTR:
140                     continue
141                 if e.errno == errno.ECHILD:
142                     # This happens if SIGCLD is set to be ignored or waiting
143                     # for child processes has otherwise been disabled for our
144                     # process.  This child is dead, we can't get the status.
145                     return 0
146                 raise
147
148     def _run_test_glib(self, test_program):
149         timeout = self._options.timeout
150         test = os.path.join(os.path.basename(os.path.dirname(test_program)), os.path.basename(test_program))
151         if self._expectations.is_slow(os.path.basename(test_program)):
152             timeout *= 5
153         return GLibTestRunner(test_program, timeout).run(skipped=self._test_cases_to_skip(test_program))
154
155     def _get_tests_from_google_test_suite(self, test_program):
156         try:
157             output = subprocess.check_output([test_program, '--gtest_list_tests'], env=self._test_env)
158         except subprocess.CalledProcessError:
159             sys.stderr.write("ERROR: could not list available tests for binary %s.\n" % (test_program))
160             sys.stderr.flush()
161             return 1
162
163         skipped_test_cases = self._test_cases_to_skip(test_program)
164
165         tests = []
166         prefix = None
167         for line in output.split('\n'):
168             if not line.startswith('  '):
169                 prefix = line
170                 continue
171             else:
172                 test_name = prefix + line.strip()
173                 if not test_name in skipped_test_cases:
174                     tests.append(test_name)
175         return tests
176
177     def _run_google_test(self, test_program, subtest):
178         command = [test_program, '--gtest_filter=%s' % (subtest)]
179         timeout = self._options.timeout
180         if self._expectations.is_slow(os.path.basename(test_program), subtest):
181             timeout *= 5
182
183         pid, fd = os.forkpty()
184         if pid == 0:
185             os.execvpe(command[0], command, self._test_env)
186             sys.exit(0)
187
188         with Timeout(timeout):
189             try:
190                 common.parse_output_lines(fd, sys.stdout.write)
191                 status = self._waitpid(pid)
192             except RuntimeError:
193                 self._kill_process(pid)
194                 sys.stdout.write("**TIMEOUT** %s\n" % subtest)
195                 sys.stdout.flush()
196                 return {subtest: "TIMEOUT"}
197
198         if status == -SIGSEGV:
199             sys.stdout.write("**CRASH** %s\n" % subtest)
200             sys.stdout.flush()
201             return {subtest: "CRASH"}
202
203         if status != 0:
204             return {subtest: "FAIL"}
205
206         return {subtest: "PASS"}
207
208     def _run_google_test_suite(self, test_program):
209         result = {}
210         for subtest in self._get_tests_from_google_test_suite(test_program):
211             result.update(self._run_google_test(test_program, subtest))
212         return result
213
214     def is_glib_test(self, test_program):
215         raise NotImplementedError
216
217     def is_google_test(self, test_program):
218         raise NotImplementedError
219
220     def _run_test(self, test_program):
221         if self.is_glib_test(test_program):
222             return self._run_test_glib(test_program)
223
224         if self.is_google_test(test_program):
225             return self._run_google_test_suite(test_program)
226
227         return {}
228
229     def run_tests(self):
230         if not self._tests:
231             sys.stderr.write("ERROR: tests not found in %s.\n" % (self._test_programs_base_dir()))
232             sys.stderr.flush()
233             return 1
234
235         if not self._setup_testing_environment():
236             return 1
237
238         # Remove skipped tests now instead of when we find them, because
239         # some tests might be skipped while setting up the test environment.
240         self._tests = [test for test in self._tests if self._should_run_test_program(test)]
241
242         crashed_tests = {}
243         failed_tests = {}
244         timed_out_tests = {}
245         passed_tests = {}
246         try:
247             for test in self._tests:
248                 results = self._run_test(test)
249                 for test_case, result in results.iteritems():
250                     if result in self._expectations.get_expectation(os.path.basename(test), test_case):
251                         continue
252
253                     if result == "FAIL":
254                         failed_tests.setdefault(test, []).append(test_case)
255                     elif result == "TIMEOUT":
256                         timed_out_tests.setdefault(test, []).append(test_case)
257                     elif result == "CRASH":
258                         crashed_tests.setdefault(test, []).append(test_case)
259                     elif result == "PASS":
260                         passed_tests.setdefault(test, []).append(test_case)
261         finally:
262             self._tear_down_testing_environment()
263
264         def report(tests, title, base_dir):
265             if not tests:
266                 return
267             sys.stdout.write("\nUnexpected %s (%d)\n" % (title, sum(len(value) for value in tests.itervalues())))
268             for test in tests:
269                 sys.stdout.write("    %s\n" % (test.replace(base_dir, '', 1)))
270                 for test_case in tests[test]:
271                     sys.stdout.write("        %s\n" % (test_case))
272             sys.stdout.flush()
273
274         report(failed_tests, "failures", self._test_programs_base_dir())
275         report(crashed_tests, "crashes", self._test_programs_base_dir())
276         report(timed_out_tests, "timeouts", self._test_programs_base_dir())
277         report(passed_tests, "passes", self._test_programs_base_dir())
278
279         return len(failed_tests) + len(timed_out_tests)
280
281
282 def add_options(option_parser):
283     option_parser.add_option('-r', '--release',
284                              action='store_true', dest='release',
285                              help='Run in Release')
286     option_parser.add_option('-d', '--debug',
287                              action='store_true', dest='debug',
288                              help='Run in Debug')
289     option_parser.add_option('--skipped', action='store', dest='skipped_action',
290                              choices=['skip', 'ignore', 'only'], default='skip',
291                              metavar='skip|ignore|only',
292                              help='Specifies how to treat the skipped tests')
293     option_parser.add_option('-t', '--timeout',
294                              action='store', type='int', dest='timeout', default=5,
295                              help='Time in seconds until a test times out')