Source code

Revision control

Other Tools

1
#!/usr/bin/env python
2
# ***** BEGIN LICENSE BLOCK *****
3
# This Source Code Form is subject to the terms of the Mozilla Public
4
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
5
# You can obtain one at http://mozilla.org/MPL/2.0/.
6
# ***** END LICENSE BLOCK *****
7
"""
8
run talos tests in a virtualenv
9
"""
10
11
import argparse
12
import io
13
import os
14
import sys
15
import pprint
16
import copy
17
import re
18
import shutil
19
import string
20
import subprocess
21
import json
22
23
import mozharness
24
from mozharness.base.config import parse_config_file
25
from mozharness.base.errors import PythonErrorList
26
from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL
27
from mozharness.base.log import INFO, WARNING
28
from mozharness.base.python import Python3Virtualenv
29
from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
30
from mozharness.base.vcs.vcsbase import MercurialScript
31
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
32
from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
33
from mozharness.mozilla.automation import TBPL_RETRY, TBPL_FAILURE, TBPL_WARNING
34
from mozharness.mozilla.tooltool import TooltoolMixin
35
from mozharness.mozilla.testing.codecoverage import (
36
CodeCoverageMixin,
37
code_coverage_config_options
38
)
39
40
41
scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
42
external_tools_path = os.path.join(scripts_path, 'external_tools')
43
44
TalosErrorList = PythonErrorList + [
45
{'regex': re.compile(r'''run-as: Package '.*' is unknown'''), 'level': DEBUG},
46
{'substr': r'''FAIL: Graph server unreachable''', 'level': CRITICAL},
47
{'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
48
{'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
49
{'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
50
{'substr': r'''talosError''', 'level': CRITICAL},
51
{'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
52
{'substr': r"""No such file or directory: 'browser_output.txt'""",
53
'level': CRITICAL,
54
'explanation': "Most likely the browser failed to launch, or the test was otherwise "
55
"unsuccessful in even starting."},
56
]
57
58
# TODO: check for running processes on script invocation
59
60
61
class TalosOutputParser(OutputParser):
62
minidump_regex = re.compile(r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"''')
63
RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
64
worst_tbpl_status = TBPL_SUCCESS
65
66
def __init__(self, **kwargs):
67
super(TalosOutputParser, self).__init__(**kwargs)
68
self.minidump_output = None
69
self.found_perf_data = []
70
71
def update_worst_log_and_tbpl_levels(self, log_level, tbpl_level):
72
self.worst_log_level = self.worst_level(log_level,
73
self.worst_log_level)
74
self.worst_tbpl_status = self.worst_level(
75
tbpl_level, self.worst_tbpl_status,
76
levels=TBPL_WORST_LEVEL_TUPLE
77
)
78
79
def parse_single_line(self, line):
80
""" In Talos land, every line that starts with RETURN: needs to be
81
printed with a TinderboxPrint:"""
82
if line.startswith("RETURN:"):
83
line.replace("RETURN:", "TinderboxPrint:")
84
m = self.minidump_regex.search(line)
85
if m:
86
self.minidump_output = (m.group(1), m.group(2), m.group(3))
87
88
m = self.RE_PERF_DATA.match(line)
89
if m:
90
self.found_perf_data.append(m.group(1))
91
92
# now let's check if we should retry
93
harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex']
94
if harness_retry_re.search(line):
95
self.critical(' %s' % line)
96
self.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_RETRY)
97
return # skip base parse_single_line
98
super(TalosOutputParser, self).parse_single_line(line)
99
100
101
class Talos(TestingMixin, MercurialScript, TooltoolMixin,
102
Python3Virtualenv, CodeCoverageMixin):
103
"""
104
install and run Talos tests
105
"""
106
config_options = [
107
[["--use-talos-json"],
108
{"action": "store_true",
109
"dest": "use_talos_json",
110
"default": False,
111
"help": "Use talos config from talos.json"
112
}],
113
[["--suite"],
114
{"action": "store",
115
"dest": "suite",
116
"help": "Talos suite to run (from talos json)"
117
}],
118
[["--system-bits"],
119
{"action": "store",
120
"dest": "system_bits",
121
"type": "choice",
122
"default": "32",
123
"choices": ['32', '64'],
124
"help": "Testing 32 or 64 (for talos json plugins)"
125
}],
126
[["--add-option"],
127
{"action": "extend",
128
"dest": "talos_extra_options",
129
"default": None,
130
"help": "extra options to talos"
131
}],
132
[["--geckoProfile"], {
133
"dest": "gecko_profile",
134
"action": "store_true",
135
"default": False,
136
"help": argparse.SUPPRESS
137
}],
138
[["--geckoProfileInterval"], {
139
"dest": "gecko_profile_interval",
140
"type": "int",
141
"default": 0,
142
"help": argparse.SUPPRESS
143
}],
144
[["--gecko-profile"], {
145
"dest": "gecko_profile",
146
"action": "store_true",
147
"default": False,
148
"help": "Whether or not to profile the test run and save the profile results"
149
}],
150
[["--gecko-profile-interval"], {
151
"dest": "gecko_profile_interval",
152
"type": "int",
153
"default": 0,
154
"help": "The interval between samples taken by the profiler (milliseconds)"
155
}],
156
[["--disable-e10s"], {
157
"dest": "e10s",
158
"action": "store_false",
159
"default": True,
160
"help": "Run without multiple processes (e10s)."
161
}],
162
[["--enable-webrender"], {
163
"action": "store_true",
164
"dest": "enable_webrender",
165
"default": False,
166
"help": "Enable the WebRender compositor in Gecko.",
167
}],
168
[["--enable-fission"], {
169
"action": "store_true",
170
"dest": "enable_fission",
171
"default": False,
172
"help": "Enable Fission (site isolation) in Gecko.",
173
}],
174
[["--setpref"], {
175
"action": "append",
176
"metavar": "PREF=VALUE",
177
"dest": "extra_prefs",
178
"default": [],
179
"help": "Defines an extra user preference."}
180
],
181
] + testing_config_options + copy.deepcopy(code_coverage_config_options)
182
183
def __init__(self, **kwargs):
184
kwargs.setdefault('config_options', self.config_options)
185
kwargs.setdefault('all_actions', ['clobber',
186
'download-and-extract',
187
'populate-webroot',
188
'create-virtualenv',
189
'install',
190
'run-tests',
191
])
192
kwargs.setdefault('default_actions', ['clobber',
193
'download-and-extract',
194
'populate-webroot',
195
'create-virtualenv',
196
'install',
197
'run-tests',
198
])
199
kwargs.setdefault('config', {})
200
super(Talos, self).__init__(**kwargs)
201
202
self.workdir = self.query_abs_dirs()['abs_work_dir'] # convenience
203
204
self.run_local = self.config.get('run_local')
205
self.installer_url = self.config.get("installer_url")
206
self.talos_json_url = self.config.get("talos_json_url")
207
self.talos_json = self.config.get("talos_json")
208
self.talos_json_config = self.config.get("talos_json_config")
209
self.repo_path = self.config.get("repo_path")
210
self.obj_path = self.config.get("obj_path")
211
self.tests = None
212
self.gecko_profile = self.config.get('gecko_profile') or \
213
"--geckoProfile" in self.config.get("talos_extra_options", []) or \
214
"--gecko-profile" in self.config.get("talos_extra_options", [])
215
self.gecko_profile_interval = self.config.get('gecko_profile_interval')
216
self.pagesets_name = None
217
self.benchmark_zip = None
218
self.webextensions_zip = None
219
220
# We accept some configuration options from the try commit message in the format
221
# mozharness: <options>
222
# Example try commit message:
223
# mozharness: --gecko-profile try: <stuff>
224
def query_gecko_profile_options(self):
225
gecko_results = []
226
# finally, if gecko_profile is set, we add that to the talos options
227
if self.gecko_profile:
228
gecko_results.append('--gecko-profile')
229
if self.gecko_profile_interval:
230
gecko_results.extend(
231
['--gecko-profile-interval', str(self.gecko_profile_interval)]
232
)
233
return gecko_results
234
235
def query_abs_dirs(self):
236
if self.abs_dirs:
237
return self.abs_dirs
238
abs_dirs = super(Talos, self).query_abs_dirs()
239
abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
240
'blobber_upload_dir')
241
abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests')
242
self.abs_dirs = abs_dirs
243
return self.abs_dirs
244
245
def query_talos_json_config(self):
246
"""Return the talos json config."""
247
if self.talos_json_config:
248
return self.talos_json_config
249
if not self.talos_json:
250
self.talos_json = os.path.join(self.talos_path, 'talos.json')
251
self.talos_json_config = parse_config_file(self.talos_json)
252
self.info(pprint.pformat(self.talos_json_config))
253
return self.talos_json_config
254
255
def make_talos_domain(self, host):
256
return host + "-talos"
257
258
def split_path(self, path):
259
result = []
260
while True:
261
path, folder = os.path.split(path)
262
if folder:
263
result.append(folder)
264
continue
265
elif path:
266
result.append(path)
267
break
268
269
result.reverse()
270
return result
271
272
def merge_paths(self, lhs, rhs):
273
backtracks = 0
274
for subdir in rhs:
275
if subdir == '..':
276
backtracks += 1
277
else:
278
break
279
return lhs[:-backtracks] + rhs[backtracks:]
280
281
def replace_relative_iframe_paths(self, directory, filename):
282
"""This will find iframes with relative paths and replace them with
283
absolute paths containing domains derived from the original source's
284
domain. This helps us better simulate real-world cases for fission
285
"""
286
if not filename.endswith('.html'):
287
return
288
289
directory_pieces = self.split_path(directory)
290
while directory_pieces and directory_pieces[0] != 'fis':
291
directory_pieces = directory_pieces[1:]
292
path = os.path.join(directory, filename)
293
294
# XXX: ugh, is there a better way to account for multiple encodings than just
295
# trying each of them?
296
encodings = ['utf-8', 'latin-1']
297
iframe_pattern = re.compile(r'(iframe.*")(\.\./.*\.html)"')
298
for encoding in encodings:
299
try:
300
with io.open(path, 'r', encoding=encoding) as f:
301
content = f.read()
302
303
def replace_iframe_src(match):
304
src = match.group(2)
305
split = self.split_path(src)
306
merged = self.merge_paths(directory_pieces, split)
307
host = merged[3]
308
site_origin_hash = self.make_talos_domain(host)
309
new_url = 'http://%s/%s"' % (site_origin_hash, string.join(merged, '/'))
310
self.info("Replacing %s with %s in iframe inside %s" %
311
(match.group(2), new_url, path))
312
return (match.group(1) + new_url)
313
314
content = re.sub(iframe_pattern, replace_iframe_src, content)
315
with io.open(path, 'w', encoding=encoding) as f:
316
f.write(content)
317
break
318
except UnicodeDecodeError:
319
pass
320
321
def query_pagesets_name(self):
322
"""Certain suites require external pagesets to be downloaded and
323
extracted.
324
"""
325
if self.pagesets_name:
326
return self.pagesets_name
327
if self.query_talos_json_config() and self.suite is not None:
328
self.pagesets_name = self.talos_json_config['suites'][self.suite].get('pagesets_name')
329
self.pagesets_name_manifest = 'tp5n-pageset.manifest'
330
return self.pagesets_name
331
332
def query_benchmark_zip(self):
333
"""Certain suites require external benchmarks to be downloaded and
334
extracted.
335
"""
336
if self.benchmark_zip:
337
return self.benchmark_zip
338
if self.query_talos_json_config() and self.suite is not None:
339
self.benchmark_zip = self.talos_json_config['suites'][self.suite].get('benchmark_zip')
340
self.benchmark_zip_manifest = 'jetstream-benchmark.manifest'
341
return self.benchmark_zip
342
343
def query_webextensions_zip(self):
344
"""Certain suites require external WebExtension sets to be downloaded and
345
extracted.
346
"""
347
if self.webextensions_zip:
348
return self.webextensions_zip
349
if self.query_talos_json_config() and self.suite is not None:
350
self.webextensions_zip = \
351
self.talos_json_config['suites'][self.suite].get('webextensions_zip')
352
self.webextensions_zip_manifest = 'webextensions.manifest'
353
return self.webextensions_zip
354
355
def get_suite_from_test(self):
356
""" Retrieve the talos suite name from a given talos test name."""
357
# running locally, single test name provided instead of suite; go through tests and
358
# find suite name
359
suite_name = None
360
if self.query_talos_json_config():
361
if '-a' in self.config['talos_extra_options']:
362
test_name_index = self.config['talos_extra_options'].index('-a') + 1
363
if '--activeTests' in self.config['talos_extra_options']:
364
test_name_index = self.config['talos_extra_options'].index('--activeTests') + 1
365
if test_name_index < len(self.config['talos_extra_options']):
366
test_name = self.config['talos_extra_options'][test_name_index]
367
for talos_suite in self.talos_json_config['suites']:
368
if test_name in self.talos_json_config['suites'][talos_suite].get('tests'):
369
suite_name = talos_suite
370
if not suite_name:
371
# no suite found to contain the specified test, error out
372
self.fatal("Test name is missing or invalid")
373
else:
374
self.fatal("Talos json config not found, cannot verify suite")
375
return suite_name
376
377
def validate_suite(self):
378
""" Ensure suite name is a valid talos suite. """
379
if self.query_talos_json_config() and self.suite is not None:
380
if self.suite not in self.talos_json_config.get('suites'):
381
self.fatal("Suite '%s' is not valid (not found in talos json config)" % self.suite)
382
383
def talos_options(self, args=None, **kw):
384
"""return options to talos"""
385
# binary path
386
binary_path = self.binary_path or self.config.get('binary_path')
387
if not binary_path:
388
msg = """Talos requires a path to the binary. You can specify binary_path or add
389
download-and-extract to your action list."""
390
self.fatal(msg)
391
392
# talos options
393
options = []
394
# talos can't gather data if the process name ends with '.exe'
395
if binary_path.endswith('.exe'):
396
binary_path = binary_path[:-4]
397
# options overwritten from **kw
398
kw_options = {'executablePath': binary_path}
399
if 'suite' in self.config:
400
kw_options['suite'] = self.config['suite']
401
if self.config.get('title'):
402
kw_options['title'] = self.config['title']
403
if self.symbols_path:
404
kw_options['symbolsPath'] = self.symbols_path
405
406
kw_options.update(kw)
407
# talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
408
tests = kw_options.get('activeTests')
409
if tests and not isinstance(tests, basestring):
410
tests = ':'.join(tests) # Talos expects this format
411
kw_options['activeTests'] = tests
412
for key, value in kw_options.items():
413
options.extend(['--%s' % key, value])
414
# configure profiling options
415
options.extend(self.query_gecko_profile_options())
416
# extra arguments
417
if args is not None:
418
options += args
419
if 'talos_extra_options' in self.config:
420
options += self.config['talos_extra_options']
421
if self.config.get('code_coverage', False):
422
options.extend(['--code-coverage'])
423
if self.config['extra_prefs']:
424
options.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
425
if self.config['enable_webrender']:
426
options.extend(['--enable-webrender'])
427
# enabling fission can come from the --enable-fission cmd line argument; or in CI
428
# it comes from a taskcluster transform which adds a --setpref for fission.autostart
429
if self.config['enable_fission'] or "fission.autostart=true" in self.config['extra_prefs']:
430
options.extend(['--enable-fission'])
431
432
return options
433
434
def populate_webroot(self):
435
"""Populate the production test slaves' webroots"""
436
self.talos_path = os.path.join(
437
self.query_abs_dirs()['abs_test_install_dir'], 'talos'
438
)
439
440
# need to determine if talos pageset is required to be downloaded
441
if self.config.get('run_local') and 'talos_extra_options' in self.config:
442
# talos initiated locally, get and verify test/suite from cmd line
443
self.talos_path = os.path.dirname(self.talos_json)
444
if ('-a' in self.config['talos_extra_options'] or
445
'--activeTests' in self.config['talos_extra_options']):
446
# test name (-a or --activeTests) specified, find out what suite it is a part of
447
self.suite = self.get_suite_from_test()
448
elif '--suite' in self.config['talos_extra_options']:
449
# --suite specified, get suite from cmd line and ensure is valid
450
suite_name_index = self.config['talos_extra_options'].index('--suite') + 1
451
if suite_name_index < len(self.config['talos_extra_options']):
452
self.suite = self.config['talos_extra_options'][suite_name_index]
453
self.validate_suite()
454
else:
455
self.fatal("Suite name not provided")
456
else:
457
# talos initiated in production via mozharness
458
self.suite = self.config['suite']
459
460
tooltool_artifacts = []
461
src_talos_pageset_dest = os.path.join(self.talos_path, 'talos', 'tests')
462
# unfortunately this path has to be short and can't be descriptive, because
463
# on Windows we tend to already push the boundaries of the max path length
464
# constraint. This will contain the tp5 pageset, but adjusted to have
465
# absolute URLs on iframes for the purposes of better modeling things for
466
# fission.
467
src_talos_pageset_multidomain_dest = os.path.join(self.talos_path,
468
'talos',
469
'fis')
470
webextension_dest = os.path.join(self.talos_path, 'talos', 'webextensions')
471
472
if self.query_pagesets_name():
473
tooltool_artifacts.append({'name': self.pagesets_name,
474
'manifest': self.pagesets_name_manifest,
475
'dest': src_talos_pageset_dest})
476
tooltool_artifacts.append({'name': self.pagesets_name,
477
'manifest': self.pagesets_name_manifest,
478
'dest': src_talos_pageset_multidomain_dest,
479
'postprocess': self.replace_relative_iframe_paths})
480
481
if self.query_benchmark_zip():
482
tooltool_artifacts.append({'name': self.benchmark_zip,
483
'manifest': self.benchmark_zip_manifest,
484
'dest': src_talos_pageset_dest})
485
486
if self.query_webextensions_zip():
487
tooltool_artifacts.append({'name': self.webextensions_zip,
488
'manifest': self.webextensions_zip_manifest,
489
'dest': webextension_dest})
490
491
# now that have the suite name, check if artifact is required, if so download it
492
# the --no-download option will override this
493
for artifact in tooltool_artifacts:
494
if '--no-download' not in self.config.get('talos_extra_options', []):
495
self.info("Downloading %s with tooltool..." % artifact)
496
497
archive = os.path.join(artifact['dest'], artifact['name'])
498
output_dir_path = re.sub(r'\.zip$', '', archive)
499
if not os.path.exists(archive):
500
manifest_file = os.path.join(self.talos_path, artifact['manifest'])
501
self.tooltool_fetch(
502
manifest_file,
503
output_dir=artifact['dest'],
504
cache=self.config.get('tooltool_cache')
505
)
506
unzip = self.query_exe('unzip')
507
unzip_cmd = [unzip, '-q', '-o', archive, '-d', artifact['dest']]
508
self.run_command(unzip_cmd, halt_on_failure=True)
509
510
if 'postprocess' in artifact:
511
for subdir, dirs, files in os.walk(output_dir_path):
512
for file in files:
513
artifact['postprocess'](subdir, file)
514
else:
515
self.info("%s already available" % artifact)
516
517
else:
518
self.info("Not downloading %s because the no-download option was specified" %
519
artifact)
520
521
# if running webkit tests locally, need to copy webkit source into talos/tests
522
if self.config.get('run_local') and ('stylebench' in self.suite or
523
'motionmark' in self.suite):
524
self.get_webkit_source()
525
526
def get_webkit_source(self):
527
# in production the build system auto copies webkit source into place;
528
# but when run locally we need to do this manually, so that talos can find it
529
src = os.path.join(self.repo_path, 'third_party', 'webkit',
530
'PerformanceTests')
531
dest = os.path.join(self.talos_path, 'talos', 'tests', 'webkit',
532
'PerformanceTests')
533
534
if os.path.exists(dest):
535
shutil.rmtree(dest)
536
537
self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
538
try:
539
shutil.copytree(src, dest)
540
except Exception:
541
self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
542
543
# Action methods. {{{1
544
# clobber defined in BaseScript
545
546
def download_and_extract(self, extract_dirs=None, suite_categories=None):
547
return super(Talos, self).download_and_extract(
548
suite_categories=['common', 'talos']
549
)
550
551
def create_virtualenv(self, **kwargs):
552
"""VirtualenvMixin.create_virtualenv() assuemes we're using
553
self.config['virtualenv_modules']. Since we are installing
554
talos from its source, we have to wrap that method here."""
555
# if virtualenv already exists, just add to path and don't re-install, need it
556
# in path so can import jsonschema later when validating output for perfherder
557
_virtualenv_path = self.config.get("virtualenv_path")
558
559
if self.run_local and os.path.exists(_virtualenv_path):
560
self.info("Virtualenv already exists, skipping creation")
561
_python_interp = self.config.get('exes')['python']
562
563
if 'win' in self.platform_name():
564
_path = os.path.join(_virtualenv_path,
565
'Lib',
566
'site-packages')
567
else:
568
_path = os.path.join(_virtualenv_path,
569
'lib',
570
os.path.basename(_python_interp),
571
'site-packages')
572
573
# if running gecko profiling install the requirements
574
if self.gecko_profile:
575
self._install_view_gecko_profile_req()
576
577
sys.path.append(_path)
578
return
579
580
# virtualenv doesn't already exist so create it
581
# install mozbase first, so we use in-tree versions
582
if not self.run_local:
583
mozbase_requirements = os.path.join(
584
self.query_abs_dirs()['abs_test_install_dir'],
585
'config',
586
'mozbase_requirements.txt'
587
)
588
else:
589
mozbase_requirements = os.path.join(
590
os.path.dirname(self.talos_path),
591
'config',
592
'mozbase_source_requirements.txt'
593
)
594
self.register_virtualenv_module(
595
requirements=[mozbase_requirements],
596
two_pass=True,
597
editable=True,
598
)
599
super(Talos, self).create_virtualenv()
600
# talos in harness requires what else is
601
# listed in talos requirements.txt file.
602
self.install_module(
603
requirements=[os.path.join(self.talos_path,
604
'requirements.txt')]
605
)
606
self._install_view_gecko_profile_req()
607
608
def _install_view_gecko_profile_req(self):
609
# if running locally and gecko profiing is on, we will be using the
610
# view-gecko-profile tool which has its own requirements too
611
if self.gecko_profile and self.run_local:
612
tools = os.path.join(self.config['repo_path'], 'testing', 'tools')
613
view_gecko_profile_req = os.path.join(tools,
614
'view_gecko_profile',
615
'requirements.txt')
616
self.info("installing requirements for the view-gecko-profile tool")
617
self.install_module(requirements=[view_gecko_profile_req])
618
619
def _validate_treeherder_data(self, parser):
620
# late import is required, because install is done in create_virtualenv
621
import jsonschema
622
623
if len(parser.found_perf_data) != 1:
624
self.critical("PERFHERDER_DATA was seen %d times, expected 1."
625
% len(parser.found_perf_data))
626
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
627
return
628
629
schema_path = os.path.join(external_tools_path,
630
'performance-artifact-schema.json')
631
self.info("Validating PERFHERDER_DATA against %s" % schema_path)
632
try:
633
with open(schema_path) as f:
634
schema = json.load(f)
635
data = json.loads(parser.found_perf_data[0])
636
jsonschema.validate(data, schema)
637
except Exception:
638
self.exception("Error while validating PERFHERDER_DATA")
639
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
640
641
def _artifact_perf_data(self, parser, dest):
642
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'local.json')
643
try:
644
shutil.copyfile(src, dest)
645
except Exception:
646
self.critical("Error copying results %s to upload dir %s" % (src, dest))
647
parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
648
649
def run_tests(self, args=None, **kw):
650
"""run Talos tests"""
651
652
# get talos options
653
options = self.talos_options(args=args, **kw)
654
655
# XXX temporary python version check
656
python = self.query_python_path()
657
self.run_command([python, "--version"])
658
parser = TalosOutputParser(config=self.config, log_obj=self.log_obj,
659
error_list=TalosErrorList)
660
env = {}
661
env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
662
if not self.run_local:
663
env['MINIDUMP_STACKWALK'] = self.query_minidump_stackwalk()
664
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
665
env['RUST_BACKTRACE'] = 'full'
666
if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
667
self.mkdir_p(env['MOZ_UPLOAD_DIR'])
668
env = self.query_env(partial_env=env, log_level=INFO)
669
# adjust PYTHONPATH to be able to use talos as a python package
670
if 'PYTHONPATH' in env:
671
env['PYTHONPATH'] = self.talos_path + os.pathsep + env['PYTHONPATH']
672
else:
673
env['PYTHONPATH'] = self.talos_path
674
675
if self.repo_path is not None:
676
env['MOZ_DEVELOPER_REPO_DIR'] = self.repo_path
677
if self.obj_path is not None:
678
env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path
679
680
# TODO: consider getting rid of this as we should be default to stylo now
681
env['STYLO_FORCE_ENABLED'] = '1'
682
683
# sets a timeout for how long talos should run without output
684
output_timeout = self.config.get('talos_output_timeout', 3600)
685
# run talos tests
686
run_tests = os.path.join(self.talos_path, 'talos', 'run_tests.py')
687
688
mozlog_opts = ['--log-tbpl-level=debug']
689
if not self.run_local and 'suite' in self.config:
690
fname_pattern = '%s_%%s.log' % self.config['suite']
691
mozlog_opts.append('--log-errorsummary=%s'
692
% os.path.join(env['MOZ_UPLOAD_DIR'],
693
fname_pattern % 'errorsummary'))
694
mozlog_opts.append('--log-raw=%s'
695
% os.path.join(env['MOZ_UPLOAD_DIR'],
696
fname_pattern % 'raw'))
697
698
def launch_in_debug_mode(cmdline):
699
cmdline = set(cmdline)
700
debug_opts = {'--debug', '--debugger', '--debugger_args'}
701
702
return bool(debug_opts.intersection(cmdline))
703
704
command = [python, run_tests] + options + mozlog_opts
705
if launch_in_debug_mode(command):
706
talos_process = subprocess.Popen(command, cwd=self.workdir, env=env, bufsize=0)
707
talos_process.wait()
708
else:
709
self.return_code = self.run_command(command, cwd=self.workdir,
710
output_timeout=output_timeout,
711
output_parser=parser,
712
env=env)
713
if parser.minidump_output:
714
self.info("Looking at the minidump files for debugging purposes...")
715
for item in parser.minidump_output:
716
self.run_command(["ls", "-l", item])
717
718
if self.return_code not in [0]:
719
# update the worst log level and tbpl status
720
log_level = ERROR
721
tbpl_level = TBPL_FAILURE
722
if self.return_code == 1:
723
log_level = WARNING
724
tbpl_level = TBPL_WARNING
725
if self.return_code == 4:
726
log_level = WARNING
727
tbpl_level = TBPL_RETRY
728
729
parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
730
elif '--no-upload-results' not in options:
731
if not self.gecko_profile:
732
self._validate_treeherder_data(parser)
733
if not self.run_local:
734
# copy results to upload dir so they are included as an artifact
735
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
736
self._artifact_perf_data(parser, dest)
737
738
self.record_status(parser.worst_tbpl_status,
739
level=parser.worst_log_level)