Source code

Revision control

Other Tools

1
from __future__ import print_function, unicode_literals
2
3
import json
4
import os
5
import sys
6
from six import iteritems, itervalues
7
8
from wptserve import sslutils
9
10
from . import environment as env
11
from . import instruments
12
from . import products
13
from . import testloader
14
from . import wptcommandline
15
from . import wptlogging
16
from . import wpttest
17
from mozlog import capture, handlers
18
from .font import FontInstaller
19
from .testrunner import ManagerGroup
20
from .browsers.base import NullBrowser
21
22
here = os.path.split(__file__)[0]
23
24
logger = None
25
26
"""Runner for web-platform-tests
27
28
The runner has several design goals:
29
30
* Tests should run with no modification from upstream.
31
32
* Tests should be regarded as "untrusted" so that errors, timeouts and even
33
crashes in the tests can be handled without failing the entire test run.
34
35
* For performance tests can be run in multiple browsers in parallel.
36
37
The upstream repository has the facility for creating a test manifest in JSON
38
format. This manifest is used directly to determine which tests exist. Local
39
metadata files are used to store the expected test results.
40
"""
41
42
def setup_logging(*args, **kwargs):
43
global logger
44
logger = wptlogging.setup(*args, **kwargs)
45
return logger
46
47
48
def get_loader(test_paths, product, debug=None, run_info_extras=None, chunker_kwargs=None, **kwargs):
49
if run_info_extras is None:
50
run_info_extras = {}
51
52
run_info = wpttest.get_run_info(kwargs["run_info"], product,
53
browser_version=kwargs.get("browser_version"),
54
browser_channel=kwargs.get("browser_channel"),
55
verify=kwargs.get("verify"),
56
debug=debug,
57
extras=run_info_extras,
58
enable_webrender=kwargs.get("enable_webrender"))
59
60
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
61
manifest_download=kwargs["manifest_download"]).load()
62
63
manifest_filters = []
64
65
if kwargs["include"] or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
66
manifest_filters.append(testloader.TestFilter(include=kwargs["include"],
67
exclude=kwargs["exclude"],
68
manifest_path=kwargs["include_manifest"],
69
test_manifests=test_manifests,
70
explicit=kwargs["default_exclude"]))
71
72
ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
73
test_loader = testloader.TestLoader(test_manifests,
74
kwargs["test_types"],
75
run_info,
76
manifest_filters=manifest_filters,
77
chunk_type=kwargs["chunk_type"],
78
total_chunks=kwargs["total_chunks"],
79
chunk_number=kwargs["this_chunk"],
80
include_https=ssl_enabled,
81
skip_timeout=kwargs["skip_timeout"],
82
skip_implementation_status=kwargs["skip_implementation_status"],
83
chunker_kwargs=chunker_kwargs)
84
return run_info, test_loader
85
86
87
def list_test_groups(test_paths, product, **kwargs):
88
env.do_delayed_imports(logger, test_paths)
89
90
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
91
92
run_info, test_loader = get_loader(test_paths, product,
93
run_info_extras=run_info_extras, **kwargs)
94
95
for item in sorted(test_loader.groups(kwargs["test_types"])):
96
print(item)
97
98
99
def list_disabled(test_paths, product, **kwargs):
100
env.do_delayed_imports(logger, test_paths)
101
102
rv = []
103
104
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
105
106
run_info, test_loader = get_loader(test_paths, product,
107
run_info_extras=run_info_extras, **kwargs)
108
109
for test_type, tests in iteritems(test_loader.disabled_tests):
110
for test in tests:
111
rv.append({"test": test.id, "reason": test.disabled()})
112
print(json.dumps(rv, indent=2))
113
114
115
def list_tests(test_paths, product, **kwargs):
116
env.do_delayed_imports(logger, test_paths)
117
118
run_info_extras = products.load_product(kwargs["config"], product)[-1](**kwargs)
119
120
run_info, test_loader = get_loader(test_paths, product,
121
run_info_extras=run_info_extras, **kwargs)
122
123
for test in test_loader.test_ids:
124
print(test)
125
126
127
def get_pause_after_test(test_loader, **kwargs):
128
if kwargs["pause_after_test"] is None:
129
if kwargs["repeat_until_unexpected"]:
130
return False
131
if kwargs["headless"]:
132
return False
133
tests = test_loader.tests
134
is_single_testharness = (sum(len(item) for item in itervalues(tests)) == 1 and
135
len(tests.get("testharness", [])) == 1)
136
if kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness:
137
return True
138
return False
139
return kwargs["pause_after_test"]
140
141
142
def run_tests(config, test_paths, product, **kwargs):
143
"""Set up the test environment, load the list of tests to be executed, and
144
invoke the remainder of the code to execute tests"""
145
if kwargs["instrument_to_file"] is None:
146
recorder = instruments.NullInstrument()
147
else:
148
recorder = instruments.Instrument(kwargs["instrument_to_file"])
149
with recorder as recording, capture.CaptureIO(logger, not kwargs["no_capture_stdio"]):
150
recording.set(["startup"])
151
env.do_delayed_imports(logger, test_paths)
152
153
product = products.load_product(config, product, load_cls=True)
154
155
env_extras = product.get_env_extras(**kwargs)
156
157
product.check_args(**kwargs)
158
159
if kwargs["install_fonts"]:
160
env_extras.append(FontInstaller(
161
logger,
162
font_dir=kwargs["font_dir"],
163
ahem=os.path.join(test_paths["/"]["tests_path"], "fonts/Ahem.ttf")
164
))
165
166
recording.set(["startup", "load_tests"])
167
168
test_source_kwargs = {"processes": kwargs["processes"]}
169
chunker_kwargs = {}
170
if kwargs["run_by_dir"] is False:
171
test_source_cls = testloader.SingleTestSource
172
else:
173
# A value of None indicates infinite depth
174
test_source_cls = testloader.PathGroupedSource
175
test_source_kwargs["depth"] = kwargs["run_by_dir"]
176
chunker_kwargs["depth"] = kwargs["run_by_dir"]
177
178
run_info, test_loader = get_loader(test_paths,
179
product.name,
180
run_info_extras=product.run_info_extras(**kwargs),
181
chunker_kwargs=chunker_kwargs,
182
**kwargs)
183
184
185
logger.info("Using %i client processes" % kwargs["processes"])
186
187
skipped_tests = 0
188
test_total = 0
189
unexpected_total = 0
190
191
if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
192
logger.critical("Unable to find any tests at the path(s):")
193
for path in kwargs["test_list"]:
194
logger.critical(" %s" % path)
195
logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
196
return False
197
kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
198
199
ssl_config = {"type": kwargs["ssl_type"],
200
"openssl": {"openssl_binary": kwargs["openssl_binary"]},
201
"pregenerated": {"host_key_path": kwargs["host_key_path"],
202
"host_cert_path": kwargs["host_cert_path"],
203
"ca_cert_path": kwargs["ca_cert_path"]}}
204
205
testharness_timeout_multipler = product.get_timeout_multiplier("testharness", run_info, **kwargs)
206
207
recording.set(["startup", "start_environment"])
208
with env.TestEnvironment(test_paths,
209
testharness_timeout_multipler,
210
kwargs["pause_after_test"],
211
kwargs["debug_info"],
212
product.env_options,
213
ssl_config,
214
env_extras) as test_environment:
215
recording.set(["startup", "ensure_environment"])
216
try:
217
test_environment.ensure_started()
218
except env.TestEnvironmentError as e:
219
logger.critical("Error starting test environment: %s" % e.message)
220
raise
221
222
recording.set(["startup"])
223
224
repeat = kwargs["repeat"]
225
repeat_count = 0
226
repeat_until_unexpected = kwargs["repeat_until_unexpected"]
227
228
while repeat_count < repeat or repeat_until_unexpected:
229
repeat_count += 1
230
if repeat_until_unexpected:
231
logger.info("Repetition %i" % (repeat_count))
232
elif repeat > 1:
233
logger.info("Repetition %i / %i" % (repeat_count, repeat))
234
235
test_count = 0
236
unexpected_count = 0
237
logger.suite_start(test_loader.test_ids,
238
name='web-platform-test',
239
run_info=run_info,
240
extra={"run_by_dir": kwargs["run_by_dir"]})
241
for test_type in kwargs["test_types"]:
242
logger.info("Running %s tests" % test_type)
243
244
# WebDriver tests may create and destroy multiple browser
245
# processes as part of their expected behavior. These
246
# processes are managed by a WebDriver server binary. This
247
# obviates the need for wptrunner to provide a browser, so
248
# the NullBrowser is used in place of the "target" browser
249
if test_type == "wdspec":
250
browser_cls = NullBrowser
251
else:
252
browser_cls = product.browser_cls
253
254
browser_kwargs = product.get_browser_kwargs(test_type,
255
run_info,
256
config=test_environment.config,
257
**kwargs)
258
259
executor_cls = product.executor_classes.get(test_type)
260
executor_kwargs = product.get_executor_kwargs(test_type,
261
test_environment.config,
262
test_environment.cache_manager,
263
run_info,
264
**kwargs)
265
266
if executor_cls is None:
267
logger.error("Unsupported test type %s for product %s" %
268
(test_type, product.name))
269
continue
270
271
for test in test_loader.disabled_tests[test_type]:
272
logger.test_start(test.id)
273
logger.test_end(test.id, status="SKIP")
274
skipped_tests += 1
275
276
if test_type == "testharness":
277
run_tests = {"testharness": []}
278
for test in test_loader.tests["testharness"]:
279
if ((test.testdriver and not executor_cls.supports_testdriver) or
280
(test.jsshell and not executor_cls.supports_jsshell)):
281
logger.test_start(test.id)
282
logger.test_end(test.id, status="SKIP")
283
skipped_tests += 1
284
else:
285
run_tests["testharness"].append(test)
286
else:
287
run_tests = test_loader.tests
288
289
recording.pause()
290
with ManagerGroup("web-platform-tests",
291
kwargs["processes"],
292
test_source_cls,
293
test_source_kwargs,
294
browser_cls,
295
browser_kwargs,
296
executor_cls,
297
executor_kwargs,
298
kwargs["rerun"],
299
kwargs["pause_after_test"],
300
kwargs["pause_on_unexpected"],
301
kwargs["restart_on_unexpected"],
302
kwargs["debug_info"],
303
not kwargs["no_capture_stdio"],
304
recording=recording) as manager_group:
305
try:
306
manager_group.run(test_type, run_tests)
307
except KeyboardInterrupt:
308
logger.critical("Main thread got signal")
309
manager_group.stop()
310
raise
311
test_count += manager_group.test_count()
312
unexpected_count += manager_group.unexpected_count()
313
recording.set(["after-end"])
314
test_total += test_count
315
unexpected_total += unexpected_count
316
logger.info("Got %i unexpected results" % unexpected_count)
317
logger.suite_end()
318
if repeat_until_unexpected and unexpected_total > 0:
319
break
320
if repeat_count == 1 and len(test_loader.test_ids) == skipped_tests:
321
break
322
323
if test_total == 0:
324
if skipped_tests > 0:
325
logger.warning("All requested tests were skipped")
326
else:
327
if kwargs["default_exclude"]:
328
logger.info("No tests ran")
329
return True
330
else:
331
logger.critical("No tests ran")
332
return False
333
334
if unexpected_total and not kwargs["fail_on_unexpected"]:
335
logger.info("Tolerating %s unexpected results" % unexpected_total)
336
return True
337
338
return unexpected_total == 0
339
340
341
def check_stability(**kwargs):
342
from . import stability
343
if kwargs["stability"]:
344
logger.warning("--stability is deprecated; please use --verify instead!")
345
kwargs['verify_max_time'] = None
346
kwargs['verify_chaos_mode'] = False
347
kwargs['verify_repeat_loop'] = 0
348
kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
349
kwargs['verify_output_results'] = True
350
351
return stability.check_stability(logger,
352
max_time=kwargs['verify_max_time'],
353
chaos_mode=kwargs['verify_chaos_mode'],
354
repeat_loop=kwargs['verify_repeat_loop'],
355
repeat_restart=kwargs['verify_repeat_restart'],
356
output_results=kwargs['verify_output_results'],
357
**kwargs)
358
359
360
def start(**kwargs):
361
assert logger is not None
362
363
logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
364
handler = handlers.LogLevelFilter(logged_critical, "CRITICAL")
365
logger.add_handler(handler)
366
367
rv = False
368
try:
369
if kwargs["list_test_groups"]:
370
list_test_groups(**kwargs)
371
elif kwargs["list_disabled"]:
372
list_disabled(**kwargs)
373
elif kwargs["list_tests"]:
374
list_tests(**kwargs)
375
elif kwargs["verify"] or kwargs["stability"]:
376
rv = check_stability(**kwargs) or logged_critical.has_log
377
else:
378
rv = not run_tests(**kwargs) or logged_critical.has_log
379
finally:
380
logger.remove_handler(handler)
381
return rv
382
383
384
def main():
385
"""Main entry point when calling from the command line"""
386
kwargs = wptcommandline.parse_args()
387
388
try:
389
if kwargs["prefs_root"] is None:
390
kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
391
392
setup_logging(kwargs, {"raw": sys.stdout})
393
394
return start(**kwargs)
395
except Exception:
396
if kwargs["pdb"]:
397
import pdb
398
import traceback
399
print(traceback.format_exc())
400
pdb.post_mortem()
401
else:
402
raise