Source code

Revision control

Other Tools

1
#!/usr/bin/env python
2
# ***** BEGIN LICENSE BLOCK *****
3
# This Source Code Form is subject to the terms of the Mozilla Public
4
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
5
# You can obtain one at http://mozilla.org/MPL/2.0/.
6
# ***** END LICENSE BLOCK *****
7
"""
8
run talos tests in a virtualenv
9
"""
10
11
import argparse
12
import os
13
import sys
14
import pprint
15
import copy
16
import re
17
import shutil
18
import subprocess
19
import json
20
21
import mozharness
22
from mozharness.base.config import parse_config_file
23
from mozharness.base.errors import PythonErrorList
24
from mozharness.base.log import OutputParser, DEBUG, ERROR, CRITICAL
25
from mozharness.base.log import INFO, WARNING
26
from mozharness.base.python import Python3Virtualenv
27
from mozharness.mozilla.testing.testbase import TestingMixin, testing_config_options
28
from mozharness.base.vcs.vcsbase import MercurialScript
29
from mozharness.mozilla.testing.errors import TinderBoxPrintRe
30
from mozharness.mozilla.automation import TBPL_SUCCESS, TBPL_WORST_LEVEL_TUPLE
31
from mozharness.mozilla.automation import TBPL_RETRY, TBPL_FAILURE, TBPL_WARNING
32
from mozharness.mozilla.tooltool import TooltoolMixin
33
from mozharness.mozilla.testing.codecoverage import (
34
CodeCoverageMixin,
35
code_coverage_config_options
36
)
37
38
39
scripts_path = os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__)))
40
external_tools_path = os.path.join(scripts_path, 'external_tools')
41
42
TalosErrorList = PythonErrorList + [
43
{'regex': re.compile(r'''run-as: Package '.*' is unknown'''), 'level': DEBUG},
44
{'substr': r'''FAIL: Graph server unreachable''', 'level': CRITICAL},
45
{'substr': r'''FAIL: Busted:''', 'level': CRITICAL},
46
{'substr': r'''FAIL: failed to cleanup''', 'level': ERROR},
47
{'substr': r'''erfConfigurator.py: Unknown error''', 'level': CRITICAL},
48
{'substr': r'''talosError''', 'level': CRITICAL},
49
{'regex': re.compile(r'''No machine_name called '.*' can be found'''), 'level': CRITICAL},
50
{'substr': r"""No such file or directory: 'browser_output.txt'""",
51
'level': CRITICAL,
52
'explanation': "Most likely the browser failed to launch, or the test was otherwise "
53
"unsuccessful in even starting."},
54
]
55
56
# TODO: check for running processes on script invocation
57
58
59
class TalosOutputParser(OutputParser):
60
minidump_regex = re.compile(r'''talosError: "error executing: '(\S+) (\S+) (\S+)'"''')
61
RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
62
worst_tbpl_status = TBPL_SUCCESS
63
64
def __init__(self, **kwargs):
65
super(TalosOutputParser, self).__init__(**kwargs)
66
self.minidump_output = None
67
self.found_perf_data = []
68
69
def update_worst_log_and_tbpl_levels(self, log_level, tbpl_level):
70
self.worst_log_level = self.worst_level(log_level,
71
self.worst_log_level)
72
self.worst_tbpl_status = self.worst_level(
73
tbpl_level, self.worst_tbpl_status,
74
levels=TBPL_WORST_LEVEL_TUPLE
75
)
76
77
def parse_single_line(self, line):
78
""" In Talos land, every line that starts with RETURN: needs to be
79
printed with a TinderboxPrint:"""
80
if line.startswith("RETURN:"):
81
line.replace("RETURN:", "TinderboxPrint:")
82
m = self.minidump_regex.search(line)
83
if m:
84
self.minidump_output = (m.group(1), m.group(2), m.group(3))
85
86
m = self.RE_PERF_DATA.match(line)
87
if m:
88
self.found_perf_data.append(m.group(1))
89
90
# now let's check if we should retry
91
harness_retry_re = TinderBoxPrintRe['harness_error']['retry_regex']
92
if harness_retry_re.search(line):
93
self.critical(' %s' % line)
94
self.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_RETRY)
95
return # skip base parse_single_line
96
super(TalosOutputParser, self).parse_single_line(line)
97
98
99
class Talos(TestingMixin, MercurialScript, TooltoolMixin,
100
Python3Virtualenv, CodeCoverageMixin):
101
"""
102
install and run Talos tests
103
"""
104
config_options = [
105
[["--use-talos-json"],
106
{"action": "store_true",
107
"dest": "use_talos_json",
108
"default": False,
109
"help": "Use talos config from talos.json"
110
}],
111
[["--suite"],
112
{"action": "store",
113
"dest": "suite",
114
"help": "Talos suite to run (from talos json)"
115
}],
116
[["--system-bits"],
117
{"action": "store",
118
"dest": "system_bits",
119
"type": "choice",
120
"default": "32",
121
"choices": ['32', '64'],
122
"help": "Testing 32 or 64 (for talos json plugins)"
123
}],
124
[["--add-option"],
125
{"action": "extend",
126
"dest": "talos_extra_options",
127
"default": None,
128
"help": "extra options to talos"
129
}],
130
[["--geckoProfile"], {
131
"dest": "gecko_profile",
132
"action": "store_true",
133
"default": False,
134
"help": argparse.SUPPRESS
135
}],
136
[["--geckoProfileInterval"], {
137
"dest": "gecko_profile_interval",
138
"type": "int",
139
"default": 0,
140
"help": argparse.SUPPRESS
141
}],
142
[["--gecko-profile"], {
143
"dest": "gecko_profile",
144
"action": "store_true",
145
"default": False,
146
"help": "Whether or not to profile the test run and save the profile results"
147
}],
148
[["--gecko-profile-interval"], {
149
"dest": "gecko_profile_interval",
150
"type": "int",
151
"default": 0,
152
"help": "The interval between samples taken by the profiler (milliseconds)"
153
}],
154
[["--disable-e10s"], {
155
"dest": "e10s",
156
"action": "store_false",
157
"default": True,
158
"help": "Run without multiple processes (e10s)."
159
}],
160
[["--enable-webrender"], {
161
"action": "store_true",
162
"dest": "enable_webrender",
163
"default": False,
164
"help": "Enable the WebRender compositor in Gecko.",
165
}],
166
[["--setpref"], {
167
"action": "append",
168
"metavar": "PREF=VALUE",
169
"dest": "extra_prefs",
170
"default": [],
171
"help": "Defines an extra user preference."}
172
],
173
] + testing_config_options + copy.deepcopy(code_coverage_config_options)
174
175
def __init__(self, **kwargs):
176
kwargs.setdefault('config_options', self.config_options)
177
kwargs.setdefault('all_actions', ['clobber',
178
'download-and-extract',
179
'populate-webroot',
180
'create-virtualenv',
181
'install',
182
'run-tests',
183
])
184
kwargs.setdefault('default_actions', ['clobber',
185
'download-and-extract',
186
'populate-webroot',
187
'create-virtualenv',
188
'install',
189
'run-tests',
190
])
191
kwargs.setdefault('config', {})
192
super(Talos, self).__init__(**kwargs)
193
194
self.workdir = self.query_abs_dirs()['abs_work_dir'] # convenience
195
196
self.run_local = self.config.get('run_local')
197
self.installer_url = self.config.get("installer_url")
198
self.talos_json_url = self.config.get("talos_json_url")
199
self.talos_json = self.config.get("talos_json")
200
self.talos_json_config = self.config.get("talos_json_config")
201
self.repo_path = self.config.get("repo_path")
202
self.obj_path = self.config.get("obj_path")
203
self.tests = None
204
self.gecko_profile = self.config.get('gecko_profile') or \
205
"--geckoProfile" in self.config.get("talos_extra_options", []) or \
206
"--gecko-profile" in self.config.get("talos_extra_options", [])
207
self.gecko_profile_interval = self.config.get('gecko_profile_interval')
208
self.pagesets_name = None
209
self.benchmark_zip = None
210
211
# We accept some configuration options from the try commit message in the format
212
# mozharness: <options>
213
# Example try commit message:
214
# mozharness: --gecko-profile try: <stuff>
215
def query_gecko_profile_options(self):
216
gecko_results = []
217
# finally, if gecko_profile is set, we add that to the talos options
218
if self.gecko_profile:
219
gecko_results.append('--gecko-profile')
220
if self.gecko_profile_interval:
221
gecko_results.extend(
222
['--gecko-profile-interval', str(self.gecko_profile_interval)]
223
)
224
return gecko_results
225
226
def query_abs_dirs(self):
227
if self.abs_dirs:
228
return self.abs_dirs
229
abs_dirs = super(Talos, self).query_abs_dirs()
230
abs_dirs['abs_blob_upload_dir'] = os.path.join(abs_dirs['abs_work_dir'],
231
'blobber_upload_dir')
232
abs_dirs['abs_test_install_dir'] = os.path.join(abs_dirs['abs_work_dir'], 'tests')
233
self.abs_dirs = abs_dirs
234
return self.abs_dirs
235
236
def query_talos_json_config(self):
237
"""Return the talos json config."""
238
if self.talos_json_config:
239
return self.talos_json_config
240
if not self.talos_json:
241
self.talos_json = os.path.join(self.talos_path, 'talos.json')
242
self.talos_json_config = parse_config_file(self.talos_json)
243
self.info(pprint.pformat(self.talos_json_config))
244
return self.talos_json_config
245
246
def query_pagesets_name(self):
247
"""Certain suites require external pagesets to be downloaded and
248
extracted.
249
"""
250
if self.pagesets_name:
251
return self.pagesets_name
252
if self.query_talos_json_config() and self.suite is not None:
253
self.pagesets_name = self.talos_json_config['suites'][self.suite].get('pagesets_name')
254
self.pagesets_name_manifest = 'tp5n-pageset.manifest'
255
return self.pagesets_name
256
257
def query_benchmark_zip(self):
258
"""Certain suites require external benchmarks to be downloaded and
259
extracted.
260
"""
261
if self.benchmark_zip:
262
return self.benchmark_zip
263
if self.query_talos_json_config() and self.suite is not None:
264
self.benchmark_zip = self.talos_json_config['suites'][self.suite].get('benchmark_zip')
265
self.benchmark_zip_manifest = 'jetstream-benchmark.manifest'
266
return self.benchmark_zip
267
268
def get_suite_from_test(self):
269
""" Retrieve the talos suite name from a given talos test name."""
270
# running locally, single test name provided instead of suite; go through tests and
271
# find suite name
272
suite_name = None
273
if self.query_talos_json_config():
274
if '-a' in self.config['talos_extra_options']:
275
test_name_index = self.config['talos_extra_options'].index('-a') + 1
276
if '--activeTests' in self.config['talos_extra_options']:
277
test_name_index = self.config['talos_extra_options'].index('--activeTests') + 1
278
if test_name_index < len(self.config['talos_extra_options']):
279
test_name = self.config['talos_extra_options'][test_name_index]
280
for talos_suite in self.talos_json_config['suites']:
281
if test_name in self.talos_json_config['suites'][talos_suite].get('tests'):
282
suite_name = talos_suite
283
if not suite_name:
284
# no suite found to contain the specified test, error out
285
self.fatal("Test name is missing or invalid")
286
else:
287
self.fatal("Talos json config not found, cannot verify suite")
288
return suite_name
289
290
def validate_suite(self):
291
""" Ensure suite name is a valid talos suite. """
292
if self.query_talos_json_config() and self.suite is not None:
293
if self.suite not in self.talos_json_config.get('suites'):
294
self.fatal("Suite '%s' is not valid (not found in talos json config)" % self.suite)
295
296
def talos_options(self, args=None, **kw):
297
"""return options to talos"""
298
# binary path
299
binary_path = self.binary_path or self.config.get('binary_path')
300
if not binary_path:
301
msg = """Talos requires a path to the binary. You can specify binary_path or add
302
download-and-extract to your action list."""
303
self.fatal(msg)
304
305
# talos options
306
options = []
307
# talos can't gather data if the process name ends with '.exe'
308
if binary_path.endswith('.exe'):
309
binary_path = binary_path[:-4]
310
# options overwritten from **kw
311
kw_options = {'executablePath': binary_path}
312
if 'suite' in self.config:
313
kw_options['suite'] = self.config['suite']
314
if self.config.get('title'):
315
kw_options['title'] = self.config['title']
316
if self.symbols_path:
317
kw_options['symbolsPath'] = self.symbols_path
318
319
kw_options.update(kw)
320
# talos expects tests to be in the format (e.g.) 'ts:tp5:tsvg'
321
tests = kw_options.get('activeTests')
322
if tests and not isinstance(tests, basestring):
323
tests = ':'.join(tests) # Talos expects this format
324
kw_options['activeTests'] = tests
325
for key, value in kw_options.items():
326
options.extend(['--%s' % key, value])
327
# configure profiling options
328
options.extend(self.query_gecko_profile_options())
329
# extra arguments
330
if args is not None:
331
options += args
332
if 'talos_extra_options' in self.config:
333
options += self.config['talos_extra_options']
334
if self.config.get('code_coverage', False):
335
options.extend(['--code-coverage'])
336
if self.config['extra_prefs']:
337
options.extend(['--setpref={}'.format(p) for p in self.config['extra_prefs']])
338
if self.config['enable_webrender']:
339
options.extend(['--enable-webrender'])
340
341
return options
342
343
def populate_webroot(self):
344
"""Populate the production test slaves' webroots"""
345
self.talos_path = os.path.join(
346
self.query_abs_dirs()['abs_test_install_dir'], 'talos'
347
)
348
349
# need to determine if talos pageset is required to be downloaded
350
if self.config.get('run_local') and 'talos_extra_options' in self.config:
351
# talos initiated locally, get and verify test/suite from cmd line
352
self.talos_path = os.path.dirname(self.talos_json)
353
if ('-a' in self.config['talos_extra_options'] or
354
'--activeTests' in self.config['talos_extra_options']):
355
# test name (-a or --activeTests) specified, find out what suite it is a part of
356
self.suite = self.get_suite_from_test()
357
elif '--suite' in self.config['talos_extra_options']:
358
# --suite specified, get suite from cmd line and ensure is valid
359
suite_name_index = self.config['talos_extra_options'].index('--suite') + 1
360
if suite_name_index < len(self.config['talos_extra_options']):
361
self.suite = self.config['talos_extra_options'][suite_name_index]
362
self.validate_suite()
363
else:
364
self.fatal("Suite name not provided")
365
else:
366
# talos initiated in production via mozharness
367
self.suite = self.config['suite']
368
369
tooltool_artifacts = []
370
if self.query_pagesets_name():
371
tooltool_artifacts.append({'name': self.pagesets_name,
372
'manifest': self.pagesets_name_manifest})
373
374
if self.query_benchmark_zip():
375
tooltool_artifacts.append({'name': self.benchmark_zip,
376
'manifest': self.benchmark_zip_manifest})
377
378
# now that have the suite name, check if artifact is required, if so download it
379
# the --no-download option will override this
380
for artifact in tooltool_artifacts:
381
if '--no-download' not in self.config.get('talos_extra_options', []):
382
self.info("Downloading %s with tooltool..." % artifact)
383
self.src_talos_webdir = os.path.join(self.talos_path, 'talos')
384
src_talos_pageset = os.path.join(self.src_talos_webdir, 'tests')
385
if not os.path.exists(os.path.join(src_talos_pageset, artifact['name'])):
386
manifest_file = os.path.join(self.talos_path, artifact['manifest'])
387
self.tooltool_fetch(
388
manifest_file,
389
output_dir=src_talos_pageset,
390
cache=self.config.get('tooltool_cache')
391
)
392
archive = os.path.join(src_talos_pageset, artifact['name'])
393
unzip = self.query_exe('unzip')
394
unzip_cmd = [unzip, '-q', '-o', archive, '-d', src_talos_pageset]
395
self.run_command(unzip_cmd, halt_on_failure=True)
396
else:
397
self.info("%s already available" % artifact)
398
else:
399
self.info("Not downloading %s because the no-download option was specified" %
400
artifact)
401
402
# if running webkit tests locally, need to copy webkit source into talos/tests
403
if self.config.get('run_local') and ('stylebench' in self.suite or
404
'motionmark' in self.suite):
405
self.get_webkit_source()
406
407
def get_webkit_source(self):
408
# in production the build system auto copies webkit source into place;
409
# but when run locally we need to do this manually, so that talos can find it
410
src = os.path.join(self.repo_path, 'third_party', 'webkit',
411
'PerformanceTests')
412
dest = os.path.join(self.talos_path, 'talos', 'tests', 'webkit',
413
'PerformanceTests')
414
415
if os.path.exists(dest):
416
shutil.rmtree(dest)
417
418
self.info("Copying webkit benchmarks from %s to %s" % (src, dest))
419
try:
420
shutil.copytree(src, dest)
421
except Exception:
422
self.critical("Error copying webkit benchmarks from %s to %s" % (src, dest))
423
424
# Action methods. {{{1
425
# clobber defined in BaseScript
426
427
def download_and_extract(self, extract_dirs=None, suite_categories=None):
428
return super(Talos, self).download_and_extract(
429
suite_categories=['common', 'talos']
430
)
431
432
def create_virtualenv(self, **kwargs):
433
"""VirtualenvMixin.create_virtualenv() assuemes we're using
434
self.config['virtualenv_modules']. Since we are installing
435
talos from its source, we have to wrap that method here."""
436
# if virtualenv already exists, just add to path and don't re-install, need it
437
# in path so can import jsonschema later when validating output for perfherder
438
_virtualenv_path = self.config.get("virtualenv_path")
439
440
if self.run_local and os.path.exists(_virtualenv_path):
441
self.info("Virtualenv already exists, skipping creation")
442
_python_interp = self.config.get('exes')['python']
443
444
if 'win' in self.platform_name():
445
_path = os.path.join(_virtualenv_path,
446
'Lib',
447
'site-packages')
448
else:
449
_path = os.path.join(_virtualenv_path,
450
'lib',
451
os.path.basename(_python_interp),
452
'site-packages')
453
454
# if running gecko profiling install the requirements
455
if self.gecko_profile:
456
self._install_view_gecko_profile_req()
457
458
sys.path.append(_path)
459
return
460
461
# virtualenv doesn't already exist so create it
462
# install mozbase first, so we use in-tree versions
463
if not self.run_local:
464
mozbase_requirements = os.path.join(
465
self.query_abs_dirs()['abs_test_install_dir'],
466
'config',
467
'mozbase_requirements.txt'
468
)
469
else:
470
mozbase_requirements = os.path.join(
471
os.path.dirname(self.talos_path),
472
'config',
473
'mozbase_source_requirements.txt'
474
)
475
self.register_virtualenv_module(
476
requirements=[mozbase_requirements],
477
two_pass=True,
478
editable=True,
479
)
480
super(Talos, self).create_virtualenv()
481
# talos in harness requires what else is
482
# listed in talos requirements.txt file.
483
self.install_module(
484
requirements=[os.path.join(self.talos_path,
485
'requirements.txt')]
486
)
487
self._install_view_gecko_profile_req()
488
489
def _install_view_gecko_profile_req(self):
490
# if running locally and gecko profiing is on, we will be using the
491
# view-gecko-profile tool which has its own requirements too
492
if self.gecko_profile and self.run_local:
493
tools = os.path.join(self.config['repo_path'], 'testing', 'tools')
494
view_gecko_profile_req = os.path.join(tools,
495
'view_gecko_profile',
496
'requirements.txt')
497
self.info("installing requirements for the view-gecko-profile tool")
498
self.install_module(requirements=[view_gecko_profile_req])
499
500
def _validate_treeherder_data(self, parser):
501
# late import is required, because install is done in create_virtualenv
502
import jsonschema
503
504
if len(parser.found_perf_data) != 1:
505
self.critical("PERFHERDER_DATA was seen %d times, expected 1."
506
% len(parser.found_perf_data))
507
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
508
return
509
510
schema_path = os.path.join(external_tools_path,
511
'performance-artifact-schema.json')
512
self.info("Validating PERFHERDER_DATA against %s" % schema_path)
513
try:
514
with open(schema_path) as f:
515
schema = json.load(f)
516
data = json.loads(parser.found_perf_data[0])
517
jsonschema.validate(data, schema)
518
except Exception:
519
self.exception("Error while validating PERFHERDER_DATA")
520
parser.update_worst_log_and_tbpl_levels(WARNING, TBPL_WARNING)
521
522
def _artifact_perf_data(self, parser, dest):
523
src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'local.json')
524
try:
525
shutil.copyfile(src, dest)
526
except Exception:
527
self.critical("Error copying results %s to upload dir %s" % (src, dest))
528
parser.update_worst_log_and_tbpl_levels(CRITICAL, TBPL_FAILURE)
529
530
def run_tests(self, args=None, **kw):
531
"""run Talos tests"""
532
533
# get talos options
534
options = self.talos_options(args=args, **kw)
535
536
# XXX temporary python version check
537
python = self.query_python_path()
538
self.run_command([python, "--version"])
539
parser = TalosOutputParser(config=self.config, log_obj=self.log_obj,
540
error_list=TalosErrorList)
541
env = {}
542
env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs()['abs_blob_upload_dir']
543
if not self.run_local:
544
env['MINIDUMP_STACKWALK'] = self.query_minidump_stackwalk()
545
env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs()['abs_blob_upload_dir']
546
env['RUST_BACKTRACE'] = 'full'
547
if not os.path.isdir(env['MOZ_UPLOAD_DIR']):
548
self.mkdir_p(env['MOZ_UPLOAD_DIR'])
549
env = self.query_env(partial_env=env, log_level=INFO)
550
# adjust PYTHONPATH to be able to use talos as a python package
551
if 'PYTHONPATH' in env:
552
env['PYTHONPATH'] = self.talos_path + os.pathsep + env['PYTHONPATH']
553
else:
554
env['PYTHONPATH'] = self.talos_path
555
556
if self.repo_path is not None:
557
env['MOZ_DEVELOPER_REPO_DIR'] = self.repo_path
558
if self.obj_path is not None:
559
env['MOZ_DEVELOPER_OBJ_DIR'] = self.obj_path
560
561
# TODO: consider getting rid of this as we should be default to stylo now
562
env['STYLO_FORCE_ENABLED'] = '1'
563
564
# sets a timeout for how long talos should run without output
565
output_timeout = self.config.get('talos_output_timeout', 3600)
566
# run talos tests
567
run_tests = os.path.join(self.talos_path, 'talos', 'run_tests.py')
568
569
mozlog_opts = ['--log-tbpl-level=debug']
570
if not self.run_local and 'suite' in self.config:
571
fname_pattern = '%s_%%s.log' % self.config['suite']
572
mozlog_opts.append('--log-errorsummary=%s'
573
% os.path.join(env['MOZ_UPLOAD_DIR'],
574
fname_pattern % 'errorsummary'))
575
mozlog_opts.append('--log-raw=%s'
576
% os.path.join(env['MOZ_UPLOAD_DIR'],
577
fname_pattern % 'raw'))
578
579
def launch_in_debug_mode(cmdline):
580
cmdline = set(cmdline)
581
debug_opts = {'--debug', '--debugger', '--debugger_args'}
582
583
return bool(debug_opts.intersection(cmdline))
584
585
command = [python, run_tests] + options + mozlog_opts
586
if launch_in_debug_mode(command):
587
talos_process = subprocess.Popen(command, cwd=self.workdir, env=env, bufsize=0)
588
talos_process.wait()
589
else:
590
self.return_code = self.run_command(command, cwd=self.workdir,
591
output_timeout=output_timeout,
592
output_parser=parser,
593
env=env)
594
if parser.minidump_output:
595
self.info("Looking at the minidump files for debugging purposes...")
596
for item in parser.minidump_output:
597
self.run_command(["ls", "-l", item])
598
599
if self.return_code not in [0]:
600
# update the worst log level and tbpl status
601
log_level = ERROR
602
tbpl_level = TBPL_FAILURE
603
if self.return_code == 1:
604
log_level = WARNING
605
tbpl_level = TBPL_WARNING
606
if self.return_code == 4:
607
log_level = WARNING
608
tbpl_level = TBPL_RETRY
609
610
parser.update_worst_log_and_tbpl_levels(log_level, tbpl_level)
611
elif '--no-upload-results' not in options:
612
if not self.gecko_profile:
613
self._validate_treeherder_data(parser)
614
if not self.run_local:
615
# copy results to upload dir so they are included as an artifact
616
dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
617
self._artifact_perf_data(parser, dest)
618
619
self.record_status(parser.worst_tbpl_status,
620
level=parser.worst_log_level)