Source code

Revision control

Other Tools

1
#!/usr/bin/env python
2
# ***** BEGIN LICENSE BLOCK *****
3
# This Source Code Form is subject to the terms of the Mozilla Public
4
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
5
# You can obtain one at http://mozilla.org/MPL/2.0/.
6
# ***** END LICENSE BLOCK *****
7
"""
8
run talos tests in a virtualenv
9
"""
10
11
import argparse
12
import io
13
import os
14
import sys
15
import pprint
16
import copy
17
import re
18
import shutil
19
import string
20
import subprocess
21
import json
22
23
import mozharness
24
from mozharness.base.config import parse_config_file
25
from mozharness.base.errors import PythonErrorList
26
from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL
27
from mozharness.base.log import INFO, WARNING
28
from mozharness.base.python import Python3Virtualenv
29
from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
30
from mozharness.base.vcs.vcsbase import MercurialScript
31
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
32
from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
33
from mozharness.mozilla.automation import TBPL_RETRY, TBPL_FAILURE, TBPL_WARNING
34
from mozharness.mozilla.tooltool import TooltoolMixin
35
from mozharness.mozilla.testing.codecoverage import (
36
CodeCoverageMixin,
37
code_coverage_config_options
38
)
39
40
41
scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
42
external_tools_path = os.path.join(scripts_path, 'external_tools')
43
44
TalosErrorList = PythonErrorList + [
45
{'regex': re.compile(r'''run-as: Package '.*' is unknown'''), 'level': DEBUG},
46
{'substr': r'''FAIL: Graph server unreachable''', 'level': CRITICAL},
47
{'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
48
{'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
49
{'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
50
{'substr': r'''talosError''', 'level': CRITICAL},
51
{'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
52
{'substr': r"""No such file or directory: 'browser_output.txt'""",
53
'level': CRITICAL,
54
'explanation': "Most likely the browser failed to launch, or the test was otherwise "
55
"unsuccessful in even starting."},
56
]
57
58
# TODO: check for running processes on script invocation
59
60
61
class TalosOutputParser(OutputParser):
62
minidump_regex = re.compile(r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"''')
63
RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
64
worst_tbpl_status = TBPL_SUCCESS
65
66
def __init__(self, **kwargs):
67
super(TalosOutputParser, self).__init__(**kwargs)
68
self.minidump_output = None
69
self.found_perf_data = []
70
71
def update_worst_log_and_tbpl_levels(self, log_level, tbpl_level):
72
self.worst_log_level = self.worst_level(log_level,
73
self.worst_log_level)
74
self.worst_tbpl_status = self.worst_level(
75
tbpl_level, self.worst_tbpl_status,
76
levels=TBPL_WORST_LEVEL_TUPLE
77
)
78
79
def parse_single_line(self, line):
80
""" In Talos land, every line that starts with RETURN: needs to be
81
printed with a TinderboxPrint:"""
82
if line.startswith("RETURN:"):
83
line.replace("RETURN:", "TinderboxPrint:")
84
m = self.minidump_regex.search(line)
85
if m:
86
self.minidump_output = (m.group(1), m.group(2), m.group(3))
87
88
m = self.RE_PERF_DATA.match(line)
89
if m:
90
self.found_perf_data.append(m.group(1))
91
92
# now let's check if we should retry
93
harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex']
94
if harness_retry_re.search(line):
95
self.critical(' %s' % line)
96
self.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_RETRY)
97
return # skip base parse_single_line
98
super(TalosOutputParser, self).parse_single_line(line)
99
100
101
class Talos(TestingMixin, MercurialScript, TooltoolMixin,
102
Python3Virtualenv, CodeCoverageMixin):
103
"""
104
install and run Talos tests
105
"""
106
config_options = [
107
[["--use-talos-json"],
108
{"action": "store_true",
109
"dest": "use_talos_json",
110
"default": False,
111
"help": "Use talos config from talos.json"
112
}],
113
[["--suite"],
114
{"action": "store",
115
"dest": "suite",
116
"help": "Talos suite to run (from talos json)"
117
}],
118
[["--system-bits"],
119
{"action": "store",
120
"dest": "system_bits",
121
"type": "choice",
122
"default": "32",
123
"choices": ['32', '64'],
124
"help": "Testing 32 or 64 (for talos json plugins)"
125
}],
126
[["--add-option"],
127
{"action": "extend",
128
"dest": "talos_extra_options",
129
"default": None,
130
"help": "extra options to talos"
131
}],
132
[["--geckoProfile"], {
133
"dest": "gecko_profile",
134
"action": "store_true",
135
"default": False,
136
"help": argparse.SUPPRESS
137
}],
138
[["--geckoProfileInterval"], {
139
"dest": "gecko_profile_interval",
140
"type": "int",
141
"default": 0,
142
"help": argparse.SUPPRESS
143
}],
144
[["--gecko-profile"], {
145
"dest": "gecko_profile",
146
"action": "store_true",
147
"default": False,
148
"help": "Whether or not to profile the test run and save the profile results"
149
}],
150
[["--gecko-profile-interval"], {
151
"dest": "gecko_profile_interval",
152
"type": "int",
153
"default": 0,
154
"help": "The interval between samples taken by the profiler (milliseconds)"
155
}],
156
[["--disable-e10s"], {
157
"dest": "e10s",
158
"action": "store_false",
159
"default": True,
160
"help": "Run without multiple processes (e10s)."
161
}],
162
[["--enable-webrender"], {
163
"action": "store_true",
164
"dest": "enable_webrender",
165
"default": False,
166
"help": "Enable the WebRender compositor in Gecko.",
167
}],
168
[["--enable-fission"], {
169
"action": "store_true",
170
"dest": "enable_fission",
171
"default": False,
172
"help": "Enable Fission (site isolation) in Gecko.",
173
}],
174
[["--setpref"], {
175
"action": "append",
176
"metavar": "PREF=VALUE",
177
"dest": "extra_prefs",
178
"default": [],
179
"help": "Defines an extra user preference."}
180
],
181
] + testing_config_options + copy.deepcopy(code_coverage_config_options)
182
183
def __init__(self, **kwargs):
184
kwargs.setdefault('config_options', self.config_options)
185
kwargs.setdefault('all_actions', ['clobber',
186
'download-and-extract',
187
'populate-webroot',
188
'create-virtualenv',
189
'install',
190
'run-tests',
191
])
192
kwargs.setdefault('default_actions', ['clobber',
193
'download-and-extract',
194
'populate-webroot',
195
'create-virtualenv',
196
'install',
197
'run-tests',
198
])
199
kwargs.setdefault('config', {})
200
super(Talos, self).__init__(**kwargs)
201
202
self.workdir = self.query_abs_dirs()['abs_work_dir'] # convenience
203
204
self.run_local = self.config.get('run_local')
205
self.installer_url = self.config.get("installer_url")
206
self.talos_json_url = self.config.get("talos_json_url")
207
self.talos_json = self.config.get("talos_json")
208
self.talos_json_config = self.config.get("talos_json_config")
209
self.repo_path = self.config.get("repo_path")
210
self.obj_path = self.config.get("obj_path")
211
self.tests = None
212
self.gecko_profile = self.config.get('gecko_profile') or \
213
"--geckoProfile" in self.config.get("talos_extra_options", []) or \
214
"--gecko-profile" in self.config.get("talos_extra_options", [])
215
self.gecko_profile_interval = self.config.get('gecko_profile_interval')
216
self.pagesets_name = None
217
self.benchmark_zip = None
218
self.webextensions_zip = None
219
220
# We accept some configuration options from the try commit message in the format
221
# mozharness: <options>
222
# Example try commit message:
223
# mozharness: --gecko-profile try: <stuff>
224
def query_gecko_profile_options(self):
225
gecko_results = []
226
# finally, if gecko_profile is set, we add that to the talos options
227
if self.gecko_profile:
228
gecko_results.append('--gecko-profile')
229
if self.gecko_profile_interval:
230
gecko_results.extend(
231
['--gecko-profile-interval', str(self.gecko_profile_interval)]
232
)
233
return gecko_results
234
235
def query_abs_dirs(self):
236
if self.abs_dirs:
237
return self.abs_dirs
238
abs_dirs = super(Talos, self).query_abs_dirs()
239
abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
240
'blobber_upload_dir')
241
abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests')
242
self.abs_dirs = abs_dirs
243
return self.abs_dirs
244
245
def query_talos_json_config(self):
246
"""Return the talos json config."""
247
if self.talos_json_config:
248
return self.talos_json_config
249
if not self.talos_json:
250
self.talos_json = os.path.join(self.talos_path, 'talos.json')
251
self.talos_json_config = parse_config_file(self.talos_json)
252
self.info(pprint.pformat(self.talos_json_config))
253
return self.talos_json_config
254
255
def make_talos_domain(self, host):
256
return host + "-talos"
257
258
def split_path(self, path):
259
result = []
260
while True:
261
path, folder = os.path.split(path)
262
if folder:
263
result.append(folder)
264
continue
265
elif path:
266
result.append(path)
267
break
268
269
result.reverse()
270
return result
271
272
def merge_paths(self, lhs, rhs):
273
backtracks = 0
274
for subdir in rhs:
275
if subdir == '..':
276
backtracks += 1
277
else:
278
break
279
return lhs[:-backtracks] + rhs[backtracks:]
280
281
def replace_relative_iframe_paths(self, directory, filename):
282
"""This will find iframes with relative paths and replace them with
283
absolute paths containing domains derived from the original source's
284
domain. This helps us better simulate real-world cases for fission
285
"""
286
if not filename.endswith('.html'):
287
return
288
289
directory_pieces = self.split_path(directory)
290
while directory_pieces and directory_pieces[0] != 'fis':
291
directory_pieces = directory_pieces[1:]
292
path = os.path.join(directory, filename)
293
294
# XXX: ugh, is there a better way to account for multiple encodings than just
295
# trying each of them?
296
encodings = ['utf-8', 'latin-1']
297
iframe_pattern = re.compile(r'(iframe.*")(\.\./.*\.html)"')
298
for encoding in encodings:
299
try:
300
with io.open(path, 'r', encoding=encoding) as f:
301
content = f.read()
302
303
def replace_iframe_src(match):
304
src = match.group(2)
305
split = self.split_path(src)
306
merged = self.merge_paths(directory_pieces, split)
307
host = merged[3]
308
site_origin_hash = self.make_talos_domain(host)
309
new_url = 'http://%s/%s"' % (site_origin_hash, string.join(merged, '/'))
310
self.info("Replacing %s with %s in iframe inside %s" %
311
(match.group(2), new_url, path))
312
return (match.group(1) + new_url)
313
314
content = re.sub(iframe_pattern, replace_iframe_src, content)
315
with io.open(path, 'w', encoding=encoding) as f:
316
f.write(content)
317
break
318
except UnicodeDecodeError:
319
pass
320
321
def query_pagesets_name(self):
322
"""Certain suites require external pagesets to be downloaded and
323
extracted.
324
"""
325
if self.pagesets_name:
326
return self.pagesets_name
327
if self.query_talos_json_config() and self.suite is not None:
328
self.pagesets_name = self.talos_json_config['suites'][self.suite].get('pagesets_name')
329
self.pagesets_name_manifest = 'tp5n-pageset.manifest'
330
return self.pagesets_name
331
332
def query_benchmark_zip(self):
333
"""Certain suites require external benchmarks to be downloaded and
334
extracted.
335
"""
336
if self.benchmark_zip:
337
return self.benchmark_zip
338
if self.query_talos_json_config() and self.suite is not None:
339
self.benchmark_zip = self.talos_json_config['suites'][self.suite].get('benchmark_zip')
340
self.benchmark_zip_manifest = 'jetstream-benchmark.manifest'
341
return self.benchmark_zip
342
343
def query_webextensions_zip(self):
344
"""Certain suites require external WebExtension sets to be downloaded and
345
extracted.
346
"""
347
if self.webextensions_zip:
348
return self.webextensions_zip
349
if self.query_talos_json_config() and self.suite is not None:
350
self.webextensions_zip = \
351
self.talos_json_config['suites'][self.suite].get('webextensions_zip')
352
self.webextensions_zip_manifest = 'webextensions.manifest'
353
return self.webextensions_zip
354
355
def get_suite_from_test(self):
356
""" Retrieve the talos suite name from a given talos test name."""
357
# running locally, single test name provided instead of suite; go through tests and
358
# find suite name
359
suite_name = None
360
if self.query_talos_json_config():
361
if '-a' in self.config['talos_extra_options']:
362
test_name_index = self.config['talos_extra_options'].index('-a') + 1
363
if '--activeTests' in self.config['talos_extra_options']:
364
test_name_index = self.config['talos_extra_options'].index('--activeTests') + 1
365
if test_name_index < len(self.config['talos_extra_options']):
366
test_name = self.config['talos_extra_options'][test_name_index]
367
for talos_suite in self.talos_json_config['suites']:
368
if test_name in self.talos_json_config['suites'][talos_suite].get('tests'):
369
suite_name = talos_suite
370
if not suite_name:
371
# no suite found to contain the specified test, error out
372
self.fatal("Test name is missing or invalid")
373
else:
374
self.fatal("Talos json config not found, cannot verify suite")
375
return suite_name
376
377
def validate_suite(self):
378
""" Ensure suite name is a valid talos suite. """
379
if self.query_talos_json_config() and self.suite is not None:
380
if self.suite not in self.talos_json_config.get('suites'):
381
self.fatal("Suite '%s' is not valid (not found in talos json config)" % self.suite)
382
383
def talos_options(self, args=None, **kw):
384
"""return options to talos"""
385
# binary path
386
binary_path = self.binary_path or self.config.get('binary_path')
387
if not binary_path:
388
msg = """Talos requires a path to the binary. You can specify binary_path or add
389
download-and-extract to your action list."""
390
self.fatal(msg)
391
392
# talos options
393
options = []
394
# talos can't gather data if the process name ends with '.exe'
395
if binary_path.endswith('.exe'):
396
binary_path = binary_path[:-4]
397
# options overwritten from **kw
398
kw_options = {'executablePath': binary_path}
399
if 'suite' in self.config:
400
kw_options['suite'] = self.config['suite']
401
if self.config.get('title'):
402
kw_options['title'] = self.config['title']
403
if self.symbols_path:
404
kw_options['symbolsPath'] = self.symbols_path
405
406
kw_options.update(kw)
407
# talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
408
tests = kw_options.get('activeTests')
409
if tests and not isinstance(tests, basestring):
410
tests = ':'.join(tests) # Talos expects this format
411
kw_options['activeTests'] = tests
412
for key, value in kw_options.items():
413
options.extend(['--%s' % key, value])
414
# configure profiling options
415
options.extend(self.query_gecko_profile_options())
416
# extra arguments
417
if args is not None:
418
options += args
419
if 'talos_extra_options' in self.config:
420
options += self.config['talos_extra_options']
421
if self.config.get('code_coverage', False):
422
options.extend(['--code-coverage'])
423
if self.config['extra_prefs']:
424
options.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
425
if self.config['enable_webrender']:
426
options.extend(['--enable-webrender'])
427
if self.config['enable_fission']:
428
options.extend(['--enable-fission'])
429
430
return options
431
432
def populate_webroot(self):
433
"""Populate the production test slaves' webroots"""
434
self.talos_path = os.path.join(
435
self.query_abs_dirs()['abs_test_install_dir'], 'talos'
436
)
437
438
# need to determine if talos pageset is required to be downloaded
439
if self.config.get('run_local') and 'talos_extra_options' in self.config:
440
# talos initiated locally, get and verify test/suite from cmd line
441
self.talos_path = os.path.dirname(self.talos_json)
442
if ('-a' in self.config['talos_extra_options'] or
443
'--activeTests' in self.config['talos_extra_options']):
444
# test name (-a or --activeTests) specified, find out what suite it is a part of
445
self.suite = self.get_suite_from_test()
446
elif '--suite' in self.config['talos_extra_options']:
447
# --suite specified, get suite from cmd line and ensure is valid
448
suite_name_index = self.config['talos_extra_options'].index('--suite') + 1
449
if suite_name_index < len(self.config['talos_extra_options']):
450
self.suite = self.config['talos_extra_options'][suite_name_index]
451
self.validate_suite()
452
else:
453
self.fatal("Suite name not provided")
454
else:
455
# talos initiated in production via mozharness
456
self.suite = self.config['suite']
457
458
tooltool_artifacts = []
459
src_talos_pageset_dest = os.path.join(self.talos_path, 'talos', 'tests')
460
# unfortunately this path has to be short and can't be descriptive, because
461
# on Windows we tend to already push the boundaries of the max path length
462
# constraint. This will contain the tp5 pageset, but adjusted to have
463
# absolute URLs on iframes for the purposes of better modeling things for
464
# fission.
465
src_talos_pageset_multidomain_dest = os.path.join(self.talos_path,
466
'talos',
467
'fis')
468
webextension_dest = os.path.join(self.talos_path, 'talos', 'webextensions')
469
470
if self.query_pagesets_name():
471
tooltool_artifacts.append({'name': self.pagesets_name,
472
'manifest': self.pagesets_name_manifest,
473
'dest': src_talos_pageset_dest})
474
tooltool_artifacts.append({'name': self.pagesets_name,
475
'manifest': self.pagesets_name_manifest,
476
'dest': src_talos_pageset_multidomain_dest,
477
'postprocess': self.replace_relative_iframe_paths})
478
479
if self.query_benchmark_zip():
480
tooltool_artifacts.append({'name': self.benchmark_zip,
481
'manifest': self.benchmark_zip_manifest,
482
'dest': src_talos_pageset_dest})
483
484
if self.query_webextensions_zip():
485
tooltool_artifacts.append({'name': self.webextensions_zip,
486
'manifest': self.webextensions_zip_manifest,
487
'dest': webextension_dest})
488
489
# now that have the suite name, check if artifact is required, if so download it
490
# the --no-download option will override this
491
for artifact in tooltool_artifacts:
492
if '--no-download' not in self.config.get('talos_extra_options', []):
493
self.info("Downloading %s with tooltool..." % artifact)
494
495
archive = os.path.join(artifact['dest'], artifact['name'])
496
output_dir_path = re.sub(r'\.zip$', '', archive)
497
if not os.path.exists(archive):
498
manifest_file = os.path.join(self.talos_path, artifact['manifest'])
499
self.tooltool_fetch(
500
manifest_file,
501
output_dir=artifact['dest'],
502
cache=self.config.get('tooltool_cache')
503
)
504
unzip = self.query_exe('unzip')
505
unzip_cmd = [unzip, '-q', '-o', archive, '-d', artifact['dest']]
506
self.run_command(unzip_cmd, halt_on_failure=True)
507
508
if 'postprocess' in artifact:
509
for subdir, dirs, files in os.walk(output_dir_path):
510
for file in files:
511
artifact['postprocess'](subdir, file)
512
else:
513
self.info("%s already available" % artifact)
514
515
else:
516
self.info("Not downloading %s because the no-download option was specified" %
517
artifact)
518
519
# if running webkit tests locally, need to copy webkit source into talos/tests
520
if self.config.get('run_local') and ('stylebench' in self.suite or
521
'motionmark' in self.suite):
522
self.get_webkit_source()
523
524
def get_webkit_source(self):
525
# in production the build system auto copies webkit source into place;
526
# but when run locally we need to do this manually, so that talos can find it
527
src = os.path.join(self.repo_path, 'third_party', 'webkit',
528
'PerformanceTests')
529
dest = os.path.join(self.talos_path, 'talos', 'tests', 'webkit',
530
'PerformanceTests')
531
532
if os.path.exists(dest):
533
shutil.rmtree(dest)
534
535
self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
536
try:
537
shutil.copytree(src, dest)
538
except Exception:
539
self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
540
541
# Action methods. {{{1
542
# clobber defined in BaseScript
543
544
def download_and_extract(self, extract_dirs=None, suite_categories=None):
545
return super(Talos, self).download_and_extract(
546
suite_categories=['common', 'talos']
547
)
548
549
def create_virtualenv(self, **kwargs):
550
"""VirtualenvMixin.create_virtualenv() assuemes we're using
551
self.config['virtualenv_modules']. Since we are installing
552
talos from its source, we have to wrap that method here."""
553
# if virtualenv already exists, just add to path and don't re-install, need it
554
# in path so can import jsonschema later when validating output for perfherder
555
_virtualenv_path = self.config.get("virtualenv_path")
556
557
if self.run_local and os.path.exists(_virtualenv_path):
558
self.info("Virtualenv already exists, skipping creation")
559
_python_interp = self.config.get('exes')['python']
560
561
if 'win' in self.platform_name():
562
_path = os.path.join(_virtualenv_path,
563
'Lib',
564
'site-packages')
565
else:
566
_path = os.path.join(_virtualenv_path,
567
'lib',
568
os.path.basename(_python_interp),
569
'site-packages')
570
571
# if running gecko profiling install the requirements
572
if self.gecko_profile:
573
self._install_view_gecko_profile_req()
574
575
sys.path.append(_path)
576
return
577
578
# virtualenv doesn't already exist so create it
579
# install mozbase first, so we use in-tree versions
580
if not self.run_local:
581
mozbase_requirements = os.path.join(
582
self.query_abs_dirs()['abs_test_install_dir'],
583
'config',
584
'mozbase_requirements.txt'
585
)
586
else:
587
mozbase_requirements = os.path.join(
588
os.path.dirname(self.talos_path),
589
'config',
590
'mozbase_source_requirements.txt'
591
)
592
self.register_virtualenv_module(
593
requirements=[mozbase_requirements],
594
two_pass=True,
595
editable=True,
596
)
597
super(Talos, self).create_virtualenv()
598
# talos in harness requires what else is
599
# listed in talos requirements.txt file.
600
self.install_module(
601
requirements=[os.path.join(self.talos_path,
602
'requirements.txt')]
603
)
604
self._install_view_gecko_profile_req()
605
606
def _install_view_gecko_profile_req(self):
607
# if running locally and gecko profiing is on, we will be using the
608
# view-gecko-profile tool which has its own requirements too
609
if self.gecko_profile and self.run_local:
610
tools = os.path.join(self.config['repo_path'], 'testing', 'tools')
611
view_gecko_profile_req = os.path.join(tools,
612
'view_gecko_profile',
613
'requirements.txt')
614
self.info("installing requirements for the view-gecko-profile tool")
615
self.install_module(requirements=[view_gecko_profile_req])
616
617
def _validate_treeherder_data(self, parser):
618
# late import is required, because install is done in create_virtualenv
619
import jsonschema
620
621
if len(parser.found_perf_data) != 1:
622
self.critical("PERFHERDER_DATA was seen %d times, expected 1."
623
% len(parser.found_perf_data))
624
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
625
return
626
627
schema_path = os.path.join(external_tools_path,
628
'performance-artifact-schema.json')
629
self.info("Validating PERFHERDER_DATA against %s" % schema_path)
630
try:
631
with open(schema_path) as f:
632
schema = json.load(f)
633
data = json.loads(parser.found_perf_data[0])
634
jsonschema.validate(data, schema)
635
except Exception:
636
self.exception("Error while validating PERFHERDER_DATA")
637
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
638
639
def _artifact_perf_data(self, parser, dest):
640
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'local.json')
641
try:
642
shutil.copyfile(src, dest)
643
except Exception:
644
self.critical("Error copying results %s to upload dir %s" % (src, dest))
645
parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
646
647
def run_tests(self, args=None, **kw):
648
"""run Talos tests"""
649
650
# get talos options
651
options = self.talos_options(args=args, **kw)
652
653
# XXX temporary python version check
654
python = self.query_python_path()
655
self.run_command([python, "--version"])
656
parser = TalosOutputParser(config=self.config, log_obj=self.log_obj,
657
error_list=TalosErrorList)
658
env = {}
659
env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
660
if not self.run_local:
661
env['MINIDUMP_STACKWALK'] = self.query_minidump_stackwalk()
662
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
663
env['RUST_BACKTRACE'] = 'full'
664
if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
665
self.mkdir_p(env['MOZ_UPLOAD_DIR'])
666
env = self.query_env(partial_env=env, log_level=INFO)
667
# adjust PYTHONPATH to be able to use talos as a python package
668
if 'PYTHONPATH' in env:
669
env['PYTHONPATH'] = self.talos_path + os.pathsep + env['PYTHONPATH']
670
else:
671
env['PYTHONPATH'] = self.talos_path
672
673
if self.repo_path is not None:
674
env['MOZ_DEVELOPER_REPO_DIR'] = self.repo_path
675
if self.obj_path is not None:
676
env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path
677
678
# TODO: consider getting rid of this as we should be default to stylo now
679
env['STYLO_FORCE_ENABLED'] = '1'
680
681
# sets a timeout for how long talos should run without output
682
output_timeout = self.config.get('talos_output_timeout', 3600)
683
# run talos tests
684
run_tests = os.path.join(self.talos_path, 'talos', 'run_tests.py')
685
686
mozlog_opts = ['--log-tbpl-level=debug']
687
if not self.run_local and 'suite' in self.config:
688
fname_pattern = '%s_%%s.log' % self.config['suite']
689
mozlog_opts.append('--log-errorsummary=%s'
690
% os.path.join(env['MOZ_UPLOAD_DIR'],
691
fname_pattern % 'errorsummary'))
692
mozlog_opts.append('--log-raw=%s'
693
% os.path.join(env['MOZ_UPLOAD_DIR'],
694
fname_pattern % 'raw'))
695
696
def launch_in_debug_mode(cmdline):
697
cmdline = set(cmdline)
698
debug_opts = {'--debug', '--debugger', '--debugger_args'}
699
700
return bool(debug_opts.intersection(cmdline))
701
702
command = [python, run_tests] + options + mozlog_opts
703
if launch_in_debug_mode(command):
704
talos_process = subprocess.Popen(command, cwd=self.workdir, env=env, bufsize=0)
705
talos_process.wait()
706
else:
707
self.return_code = self.run_command(command, cwd=self.workdir,
708
output_timeout=output_timeout,
709
output_parser=parser,
710
env=env)
711
if parser.minidump_output:
712
self.info("Looking at the minidump files for debugging purposes...")
713
for item in parser.minidump_output:
714
self.run_command(["ls", "-l", item])
715
716
if self.return_code not in [0]:
717
# update the worst log level and tbpl status
718
log_level = ERROR
719
tbpl_level = TBPL_FAILURE
720
if self.return_code == 1:
721
log_level = WARNING
722
tbpl_level = TBPL_WARNING
723
if self.return_code == 4:
724
log_level = WARNING
725
tbpl_level = TBPL_RETRY
726
727
parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
728
elif '--no-upload-results' not in options:
729
if not self.gecko_profile:
730
self._validate_treeherder_data(parser)
731
if not self.run_local:
732
# copy results to upload dir so they are included as an artifact
733
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
734
self._artifact_perf_data(parser, dest)
735
736
self.record_status(parser.worst_tbpl_status,
737
level=parser.worst_log_level)