Source code

Revision control

Other Tools

1
# This Source Code Form is subject to the terms of the Mozilla Public
2
# License, v. 2.0. If a copy of the MPL was not distributed with this
3
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5
from __future__ import absolute_import, print_function, unicode_literals
6
7
import argparse
8
import copy
9
import logging
10
import re
11
import shlex
12
from collections import defaultdict
13
14
logger = logging.getLogger(__name__)
15
16
# The build type aliases are very cryptic and only used in try flags these are
17
# mappings from the single char alias to a longer more recognizable form.
18
BUILD_TYPE_ALIASES = {
19
'o': 'opt',
20
'd': 'debug'
21
}
22
23
# consider anything in this whitelist of kinds to be governed by -b/-p
24
BUILD_KINDS = set([
25
'build',
26
'artifact-build',
27
'hazard',
28
'l10n',
29
'valgrind',
30
'spidermonkey',
31
])
32
33
34
# mapping from shortcut name (usable with -u) to a boolean function identifying
35
# matching test names
36
def alias_prefix(prefix):
37
return lambda name: name.startswith(prefix)
38
39
40
def alias_contains(infix):
41
return lambda name: infix in name
42
43
44
def alias_matches(pattern):
45
pattern = re.compile(pattern)
46
return lambda name: pattern.match(name)
47
48
49
UNITTEST_ALIASES = {
50
# Aliases specify shorthands that can be used in try syntax. The shorthand
51
# is the dictionary key, with the value representing a pattern for matching
52
# unittest_try_names.
53
#
54
# Note that alias expansion is performed in the absence of any chunk
55
# prefixes. For example, the first example above would replace "foo-7"
56
# with "foobar-7". Note that a few aliases allowed chunks to be specified
57
# without a leading `-`, for example 'mochitest-dt1'. That's no longer
58
# supported.
59
'cppunit': alias_prefix('cppunit'),
60
'crashtest': alias_prefix('crashtest'),
61
'crashtest-e10s': alias_prefix('crashtest-e10s'),
62
'e10s': alias_contains('e10s'),
63
'firefox-ui-functional': alias_prefix('firefox-ui-functional'),
64
'gaia-js-integration': alias_contains('gaia-js-integration'),
65
'gtest': alias_prefix('gtest'),
66
'jittest': alias_prefix('jittest'),
67
'jittests': alias_prefix('jittest'),
68
'jsreftest': alias_prefix('jsreftest'),
69
'jsreftest-e10s': alias_prefix('jsreftest-e10s'),
70
'marionette': alias_prefix('marionette'),
71
'mochitest': alias_prefix('mochitest'),
72
'mochitests': alias_prefix('mochitest'),
73
'mochitest-e10s': alias_prefix('mochitest-e10s'),
74
'mochitests-e10s': alias_prefix('mochitest-e10s'),
75
'mochitest-debug': alias_prefix('mochitest-debug-'),
76
'mochitest-a11y': alias_contains('mochitest-a11y'),
77
'mochitest-bc': alias_prefix('mochitest-browser-chrome'),
78
'mochitest-e10s-bc': alias_prefix('mochitest-browser-chrome-e10s'),
79
'mochitest-browser-chrome': alias_prefix('mochitest-browser-chrome'),
80
'mochitest-e10s-browser-chrome': alias_prefix('mochitest-browser-chrome-e10s'),
81
'mochitest-chrome': alias_contains('mochitest-chrome'),
82
'mochitest-dt': alias_prefix('mochitest-devtools-chrome'),
83
'mochitest-e10s-dt': alias_prefix('mochitest-devtools-chrome-e10s'),
84
'mochitest-gl': alias_prefix('mochitest-webgl'),
85
'mochitest-gl-e10s': alias_prefix('mochitest-webgl-e10s'),
86
'mochitest-gpu': alias_prefix('mochitest-gpu'),
87
'mochitest-gpu-e10s': alias_prefix('mochitest-gpu-e10s'),
88
'mochitest-media': alias_prefix('mochitest-media'),
89
'mochitest-media-e10s': alias_prefix('mochitest-media-e10s'),
90
'mochitest-vg': alias_prefix('mochitest-valgrind'),
91
'reftest': alias_matches(r'^(plain-)?reftest.*$'),
92
'reftest-no-accel': alias_matches(r'^(plain-)?reftest-no-accel.*$'),
93
'reftests': alias_matches(r'^(plain-)?reftest.*$'),
94
'reftests-e10s': alias_matches(r'^(plain-)?reftest-e10s.*$'),
95
'reftest-gpu': alias_matches(r'^(plain-)?reftest-gpu.*$'),
96
'robocop': alias_prefix('robocop'),
97
'web-platform-test': alias_prefix('web-platform-tests'),
98
'web-platform-tests': alias_prefix('web-platform-tests'),
99
'web-platform-tests-e10s': alias_prefix('web-platform-tests-e10s'),
100
'web-platform-tests-crashtests': alias_prefix('web-platform-tests-crashtests'),
101
'web-platform-tests-reftests': alias_prefix('web-platform-tests-reftests'),
102
'web-platform-tests-reftests-e10s': alias_prefix('web-platform-tests-reftests-e10s'),
103
'web-platform-tests-wdspec': alias_prefix('web-platform-tests-wdspec'),
104
'web-platform-tests-wdspec-e10s': alias_prefix('web-platform-tests-wdspec-e10s'),
105
'xpcshell': alias_prefix('xpcshell'),
106
}
107
108
# unittest platforms can be specified by substring of the "pretty name", which
109
# is basically the old Buildbot builder name. This dict has {pretty name,
110
# [test_platforms]} translations, This includes only the most commonly-used
111
# substrings. It is OK to add new test platforms to various shorthands here;
112
# if you add a new Linux64 test platform for instance, people will expect that
113
# their previous methods of requesting "all linux64 tests" will include this
114
# new platform, and they shouldn't have to explicitly spell out the new platform
115
# every time for such cases.
116
#
117
# Note that the test platforms here are only the prefix up to the `/`.
118
UNITTEST_PLATFORM_PRETTY_NAMES = {
119
'Ubuntu': [
120
'linux32',
121
'linux64',
122
'linux64-asan'
123
],
124
'x64': [
125
'linux64',
126
'linux64-asan'
127
],
128
'Android 7.0 Moto G5 32bit': ['android-hw-g5-7.0-arm7-api-16'],
129
'Android 8.0 Google Pixel 2 32bit': ['android-hw-p2-8.0-arm7-api-16'],
130
'Android 8.0 Google Pixel 2 64bit': ['android-hw-p2-8.0-android-aarch64'],
131
'10.14': ['macosx1014-64'],
132
# other commonly-used substrings for platforms not yet supported with
133
# in-tree taskgraphs:
134
# '10.10.5': [..TODO..],
135
# '10.6': [..TODO..],
136
# '10.8': [..TODO..],
137
# 'Android 2.3 API9': [..TODO..],
138
'Windows 7': ['windows7-32'],
139
'Windows 7 VM': ['windows7-32-vm'],
140
'Windows 8': ['windows8-64'],
141
'Windows 10': ['windows10-64'],
142
# 'Windows XP': [..TODO..],
143
# 'win32': [..TODO..],
144
# 'win64': [..TODO..],
145
}
146
147
TEST_CHUNK_SUFFIX = re.compile('(.*)-([0-9]+)$')
148
149
150
def escape_whitespace_in_brackets(input_str):
151
'''
152
In tests you may restrict them by platform [] inside of the brackets
153
whitespace may occur this is typically invalid shell syntax so we escape it
154
with backslash sequences .
155
'''
156
result = ""
157
in_brackets = False
158
for char in input_str:
159
if char == '[':
160
in_brackets = True
161
result += char
162
continue
163
164
if char == ']':
165
in_brackets = False
166
result += char
167
continue
168
169
if char == ' ' and in_brackets:
170
result += '\ '
171
continue
172
173
result += char
174
175
return result
176
177
178
def split_try_msg(message):
179
try:
180
try_idx = message.index('try:')
181
except ValueError:
182
return []
183
message = message[try_idx:].split('\n')[0]
184
# shlex used to ensure we split correctly when giving values to argparse.
185
return shlex.split(escape_whitespace_in_brackets(message))
186
187
188
def parse_message(message):
189
parts = split_try_msg(message)
190
191
# Argument parser based on try flag flags
192
parser = argparse.ArgumentParser()
193
parser.add_argument('-b', '--build', dest='build_types')
194
parser.add_argument('-p', '--platform', nargs='?',
195
dest='platforms', const='all', default='all')
196
parser.add_argument('-u', '--unittests', nargs='?',
197
dest='unittests', const='all', default='all')
198
parser.add_argument('-t', '--talos', nargs='?', dest='talos', const='all', default='none')
199
parser.add_argument('-r', '--raptor', nargs='?', dest='raptor', const='all', default='none')
200
parser.add_argument('-i', '--interactive',
201
dest='interactive', action='store_true', default=False)
202
parser.add_argument('-e', '--all-emails',
203
dest='notifications', action='store_const', const='all')
204
parser.add_argument('-f', '--failure-emails',
205
dest='notifications', action='store_const', const='failure')
206
parser.add_argument('-j', '--job', dest='jobs', action='append')
207
parser.add_argument('--rebuild-talos', dest='talos_trigger_tests', action='store',
208
type=int, default=1)
209
parser.add_argument('--rebuild-raptor', dest='raptor_trigger_tests', action='store',
210
type=int, default=1)
211
parser.add_argument('--setenv', dest='env', action='append')
212
parser.add_argument('--geckoProfile', dest='profile', action='store_true')
213
parser.add_argument('--tag', dest='tag', action='store', default=None)
214
parser.add_argument('--no-retry', dest='no_retry', action='store_true')
215
parser.add_argument('--include-nightly', dest='include_nightly', action='store_true')
216
parser.add_argument('--artifact', dest='artifact', action='store_true')
217
218
# While we are transitioning from BB to TC, we want to push jobs to tc-worker
219
# machines but not overload machines with every try push. Therefore, we add
220
# this temporary option to be able to push jobs to tc-worker.
221
parser.add_argument('-w', '--taskcluster-worker',
222
dest='taskcluster_worker', action='store_true', default=False)
223
224
# In order to run test jobs multiple times
225
parser.add_argument('--rebuild', dest='trigger_tests', type=int, default=1)
226
args, _ = parser.parse_known_args(parts)
227
return vars(args)
228
229
230
class TryOptionSyntax(object):
231
232
def __init__(self, parameters, full_task_graph, graph_config):
233
"""
234
Apply the try options in parameters.
235
236
The resulting object has attributes:
237
238
- build_types: a list containing zero or more of 'opt' and 'debug'
239
- platforms: a list of selected platform names, or None for all
240
- unittests: a list of tests, of the form given below, or None for all
241
- jobs: a list of requested job names, or None for all
242
- trigger_tests: the number of times tests should be triggered (--rebuild)
243
- interactive: true if --interactive
244
- notifications: either None if no notifications or one of 'all' or 'failure'
245
- talos_trigger_tests: the number of time talos tests should be triggered (--rebuild-talos)
246
- env: additional environment variables (ENV=value)
247
- profile: run talos in profile mode
248
- tag: restrict tests to the specified tag
249
- no_retry: do not retry failed jobs
250
251
The unittests and talos lists contain dictionaries of the form:
252
253
{
254
'test': '<suite name>',
255
'platforms': [..platform names..], # to limit to only certain platforms
256
'only_chunks': set([..chunk numbers..]), # to limit only to certain chunks
257
}
258
"""
259
self.full_task_graph = full_task_graph
260
self.graph_config = graph_config
261
self.jobs = []
262
self.build_types = []
263
self.platforms = []
264
self.unittests = []
265
self.talos = []
266
self.raptor = []
267
self.trigger_tests = 0
268
self.interactive = False
269
self.notifications = None
270
self.talos_trigger_tests = 0
271
self.raptor_trigger_tests = 0
272
self.env = []
273
self.profile = False
274
self.tag = None
275
self.no_retry = False
276
self.artifact = False
277
278
options = parameters['try_options']
279
if not options:
280
return None
281
self.jobs = self.parse_jobs(options['jobs'])
282
self.build_types = self.parse_build_types(options['build_types'], full_task_graph)
283
self.platforms = self.parse_platforms(options['platforms'], full_task_graph)
284
self.unittests = self.parse_test_option(
285
"unittest_try_name", options['unittests'], full_task_graph)
286
self.talos = self.parse_test_option("talos_try_name", options['talos'], full_task_graph)
287
self.raptor = self.parse_test_option("raptor_try_name", options['raptor'], full_task_graph)
288
self.trigger_tests = options['trigger_tests']
289
self.interactive = options['interactive']
290
self.notifications = options['notifications']
291
self.talos_trigger_tests = options['talos_trigger_tests']
292
self.raptor_trigger_tests = options['raptor_trigger_tests']
293
self.env = options['env']
294
self.profile = options['profile']
295
self.tag = options['tag']
296
self.no_retry = options['no_retry']
297
self.artifact = options['artifact']
298
self.include_nightly = options['include_nightly']
299
300
self.test_tiers = self.generate_test_tiers(full_task_graph)
301
302
def generate_test_tiers(self, full_task_graph):
303
retval = defaultdict(set)
304
for t in full_task_graph.tasks.itervalues():
305
if t.attributes.get('kind') == 'test':
306
try:
307
tier = t.task['extra']['treeherder']['tier']
308
name = t.attributes.get('unittest_try_name')
309
retval[name].add(tier)
310
except KeyError:
311
pass
312
313
return retval
314
315
def parse_jobs(self, jobs_arg):
316
if not jobs_arg or jobs_arg == ['none']:
317
return [] # default is `-j none`
318
if jobs_arg == ['all']:
319
return None
320
expanded = []
321
for job in jobs_arg:
322
expanded.extend(j.strip() for j in job.split(','))
323
return expanded
324
325
def parse_build_types(self, build_types_arg, full_task_graph):
326
if build_types_arg is None:
327
build_types_arg = []
328
329
build_types = filter(None, [BUILD_TYPE_ALIASES.get(build_type) for
330
build_type in build_types_arg])
331
332
all_types = set(t.attributes['build_type']
333
for t in full_task_graph.tasks.itervalues()
334
if 'build_type' in t.attributes)
335
bad_types = set(build_types) - all_types
336
if bad_types:
337
raise Exception("Unknown build type(s) [%s] specified for try" % ','.join(bad_types))
338
339
return build_types
340
341
def parse_platforms(self, platform_arg, full_task_graph):
342
if platform_arg == 'all':
343
return None
344
345
RIDEALONG_BUILDS = self.graph_config['try']['ridealong-builds']
346
results = []
347
for build in platform_arg.split(','):
348
results.append(build)
349
if build in ('macosx64',):
350
results.append('macosx64-shippable')
351
logger.info("adding macosx64-shippable for try syntax using macosx64.")
352
if build in RIDEALONG_BUILDS:
353
results.extend(RIDEALONG_BUILDS[build])
354
logger.info("platform %s triggers ridealong builds %s" %
355
(build, ', '.join(RIDEALONG_BUILDS[build])))
356
357
test_platforms = set(t.attributes['test_platform']
358
for t in full_task_graph.tasks.itervalues()
359
if 'test_platform' in t.attributes)
360
build_platforms = set(t.attributes['build_platform']
361
for t in full_task_graph.tasks.itervalues()
362
if 'build_platform' in t.attributes)
363
all_platforms = test_platforms | build_platforms
364
bad_platforms = set(results) - all_platforms
365
if bad_platforms:
366
raise Exception("Unknown platform(s) [%s] specified for try" % ','.join(bad_platforms))
367
368
return results
369
370
def parse_test_option(self, attr_name, test_arg, full_task_graph):
371
'''
372
373
Parse a unittest (-u) or talos (-t) option, in the context of a full
374
task graph containing available `unittest_try_name` or `talos_try_name`
375
attributes. There are three cases:
376
377
- test_arg is == 'none' (meaning an empty list)
378
- test_arg is == 'all' (meaning use the list of jobs for that job type)
379
- test_arg is comma string which needs to be parsed
380
'''
381
382
# Empty job list case...
383
if test_arg is None or test_arg == 'none':
384
return []
385
386
all_platforms = set(t.attributes['test_platform'].split('/')[0]
387
for t in full_task_graph.tasks.itervalues()
388
if 'test_platform' in t.attributes)
389
390
tests = self.parse_test_opts(test_arg, all_platforms)
391
392
if not tests:
393
return []
394
395
all_tests = set(t.attributes[attr_name]
396
for t in full_task_graph.tasks.itervalues()
397
if attr_name in t.attributes)
398
399
# Special case where tests is 'all' and must be expanded
400
if tests[0]['test'] == 'all':
401
results = []
402
all_entry = tests[0]
403
for test in all_tests:
404
entry = {'test': test}
405
# If there are platform restrictions copy them across the list.
406
if 'platforms' in all_entry:
407
entry['platforms'] = list(all_entry['platforms'])
408
results.append(entry)
409
return self.parse_test_chunks(all_tests, results)
410
else:
411
return self.parse_test_chunks(all_tests, tests)
412
413
def parse_test_opts(self, input_str, all_platforms):
414
'''
415
Parse `testspec,testspec,..`, where each testspec is a test name
416
optionally followed by a list of test platforms or negated platforms in
417
`[]`.
418
419
No brackets indicates that tests should run on all platforms for which
420
builds are available. If testspecs are provided, then each is treated,
421
from left to right, as an instruction to include or (if negated)
422
exclude a set of test platforms. A single spec may expand to multiple
423
test platforms via UNITTEST_PLATFORM_PRETTY_NAMES. If the first test
424
spec is negated, processing begins with the full set of available test
425
platforms; otherwise, processing begins with an empty set of test
426
platforms.
427
'''
428
429
# Final results which we will return.
430
tests = []
431
432
cur_test = {}
433
token = ''
434
in_platforms = False
435
436
def normalize_platforms():
437
if 'platforms' not in cur_test:
438
return
439
# if the first spec is a negation, start with all platforms
440
if cur_test['platforms'][0][0] == '-':
441
platforms = all_platforms.copy()
442
else:
443
platforms = []
444
for platform in cur_test['platforms']:
445
if platform[0] == '-':
446
platforms = [p for p in platforms if p != platform[1:]]
447
else:
448
platforms.append(platform)
449
cur_test['platforms'] = platforms
450
451
def add_test(value):
452
normalize_platforms()
453
cur_test['test'] = value.strip()
454
tests.insert(0, cur_test)
455
456
def add_platform(value):
457
platform = value.strip()
458
if platform[0] == '-':
459
negated = True
460
platform = platform[1:]
461
else:
462
negated = False
463
platforms = UNITTEST_PLATFORM_PRETTY_NAMES.get(platform, [platform])
464
if negated:
465
platforms = ["-" + p for p in platforms]
466
cur_test['platforms'] = platforms + cur_test.get('platforms', [])
467
468
# This might be somewhat confusing but we parse the string _backwards_ so
469
# there is no ambiguity over what state we are in.
470
for char in reversed(input_str):
471
472
# , indicates exiting a state
473
if char == ',':
474
475
# Exit a particular platform.
476
if in_platforms:
477
add_platform(token)
478
479
# Exit a particular test.
480
else:
481
add_test(token)
482
cur_test = {}
483
484
# Token must always be reset after we exit a state
485
token = ''
486
elif char == '[':
487
# Exiting platform state entering test state.
488
add_platform(token)
489
token = ''
490
in_platforms = False
491
elif char == ']':
492
# Entering platform state.
493
in_platforms = True
494
else:
495
# Accumulator.
496
token = char + token
497
498
# Handle any left over tokens.
499
if token:
500
add_test(token)
501
502
return tests
503
504
def handle_alias(self, test, all_tests):
505
'''
506
Expand a test if its name refers to an alias, returning a list of test
507
dictionaries cloned from the first (to maintain any metadata).
508
'''
509
if test['test'] not in UNITTEST_ALIASES:
510
return [test]
511
512
alias = UNITTEST_ALIASES[test['test']]
513
514
def mktest(name):
515
newtest = copy.deepcopy(test)
516
newtest['test'] = name
517
return newtest
518
519
def exprmatch(alias):
520
return [t for t in all_tests if alias(t)]
521
522
return [mktest(t) for t in exprmatch(alias)]
523
524
def parse_test_chunks(self, all_tests, tests):
525
'''
526
Test flags may include parameters to narrow down the number of chunks in a
527
given push. We don't model 1 chunk = 1 job in taskcluster so we must check
528
each test flag to see if it is actually specifying a chunk.
529
'''
530
results = []
531
seen_chunks = {}
532
for test in tests:
533
matches = TEST_CHUNK_SUFFIX.match(test['test'])
534
if matches:
535
name = matches.group(1)
536
chunk = matches.group(2)
537
if name in seen_chunks:
538
seen_chunks[name].add(chunk)
539
else:
540
seen_chunks[name] = {chunk}
541
test['test'] = name
542
test['only_chunks'] = seen_chunks[name]
543
results.append(test)
544
else:
545
results.extend(self.handle_alias(test, all_tests))
546
547
# uniquify the results over the test names
548
results = {test['test']: test for test in results}.values()
549
return results
550
551
def find_all_attribute_suffixes(self, graph, prefix):
552
rv = set()
553
for t in graph.tasks.itervalues():
554
for a in t.attributes:
555
if a.startswith(prefix):
556
rv.add(a[len(prefix):])
557
return sorted(rv)
558
559
def task_matches(self, task):
560
attr = task.attributes.get
561
562
def check_run_on_projects():
563
if attr('nightly') and not self.include_nightly:
564
return False
565
return set(['try', 'all']) & set(attr('run_on_projects', []))
566
567
# Don't schedule code coverage when try option syntax is used
568
if 'ccov' in attr('build_platform', []):
569
return False
570
571
# Don't schedule tasks for windows10-aarch64 unless try fuzzy is used
572
if 'windows10-aarch64' in attr("test_platform", ""):
573
return False
574
575
# Don't schedule android-hw tests when try option syntax is used
576
if 'android-hw' in task.label:
577
return False
578
579
# Don't schedule fission tests when try option syntax is used
580
if attr('unittest_variant') == 'fission':
581
return False
582
583
def match_test(try_spec, attr_name):
584
run_by_default = True
585
if attr('build_type') not in self.build_types:
586
return False
587
588
if self.platforms is not None and attr('build_platform') not in self.platforms:
589
return False
590
elif not check_run_on_projects():
591
run_by_default = False
592
593
if try_spec is None:
594
return run_by_default
595
596
# TODO: optimize this search a bit
597
for test in try_spec:
598
if attr(attr_name) == test['test']:
599
break
600
else:
601
return False
602
603
if 'only_chunks' in test and attr('test_chunk') not in test['only_chunks']:
604
return False
605
606
tier = task.task['extra']['treeherder']['tier']
607
if 'platforms' in test:
608
if 'all' in test['platforms']:
609
return True
610
platform = attr('test_platform', '').split('/')[0]
611
# Platforms can be forced by syntax like "-u xpcshell[Windows 8]"
612
return platform in test['platforms']
613
elif tier != 1:
614
# Run Tier 2/3 tests if their build task is Tier 2/3 OR if there is
615
# no tier 1 test of that name.
616
build_task = self.full_task_graph.tasks[task.dependencies['build']]
617
build_task_tier = build_task.task['extra']['treeherder']['tier']
618
619
name = attr('unittest_try_name')
620
test_tiers = self.test_tiers.get(name)
621
622
if tier <= build_task_tier:
623
logger.debug("not skipping tier {} test {} because build task {} "
624
"is tier {}"
625
.format(tier, task.label, build_task.label,
626
build_task_tier))
627
return True
628
elif 1 not in test_tiers:
629
logger.debug("not skipping tier {} test {} without explicit inclusion; "
630
"it is configured to run on tiers {}"
631
.format(tier, task.label, test_tiers))
632
return True
633
else:
634
logger.debug("skipping tier {} test {} because build task {} is "
635
"tier {} and there is a higher-tier test of the same name"
636
.format(tier, task.label, build_task.label, build_task_tier))
637
return False
638
elif run_by_default:
639
return check_run_on_projects()
640
else:
641
return False
642
643
if attr('job_try_name'):
644
# Beware the subtle distinction between [] and None for self.jobs and self.platforms.
645
# They will be [] if there was no try syntax, and None if try syntax was detected but
646
# they remained unspecified.
647
if self.jobs is not None:
648
return attr('job_try_name') in self.jobs
649
650
# User specified `-j all`
651
if self.platforms is not None and attr('build_platform') not in self.platforms:
652
return False # honor -p for jobs governed by a platform
653
# "all" means "everything with `try` in run_on_projects"
654
return check_run_on_projects()
655
elif attr('kind') == 'test':
656
return match_test(self.unittests, 'unittest_try_name') \
657
or match_test(self.talos, 'talos_try_name') \
658
or match_test(self.raptor, 'raptor_try_name')
659
elif attr('kind') in BUILD_KINDS:
660
if attr('build_type') not in self.build_types:
661
return False
662
elif self.platforms is None:
663
# for "-p all", look for try in the 'run_on_projects' attribute
664
return check_run_on_projects()
665
else:
666
if attr('build_platform') not in self.platforms:
667
return False
668
return True
669
else:
670
return False
671
672
def __str__(self):
673
def none_for_all(list):
674
if list is None:
675
return '<all>'
676
return ', '.join(str(e) for e in list)
677
678
return "\n".join([
679
"build_types: " + ", ".join(self.build_types),
680
"platforms: " + none_for_all(self.platforms),
681
"unittests: " + none_for_all(self.unittests),
682
"talos: " + none_for_all(self.talos),
683
"raptor" + none_for_all(self.raptor),
684
"jobs: " + none_for_all(self.jobs),
685
"trigger_tests: " + str(self.trigger_tests),
686
"interactive: " + str(self.interactive),
687
"notifications: " + str(self.notifications),
688
"talos_trigger_tests: " + str(self.talos_trigger_tests),
689
"raptor_trigger_tests: " + str(self.raptor_trigger_tests),
690
"env: " + str(self.env),
691
"profile: " + str(self.profile),
692
"tag: " + str(self.tag),
693
"no_retry: " + str(self.no_retry),
694
"artifact: " + str(self.artifact),
695
])