Source code

Revision control

Other Tools

1
# This Source Code Form is subject to the terms of the Mozilla Public
2
# License, v. 2.0. If a copy of the MPL was not distributed with this
3
# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
4
5
from __future__ import absolute_import, print_function, unicode_literals
6
7
import argparse
8
import hashlib
9
import io
10
import itertools
11
import json
12
import logging
13
import ntpath
14
import operator
15
import os
16
import re
17
import shutil
18
import subprocess
19
import sys
20
import tarfile
21
import tempfile
22
import xml.etree.ElementTree as ET
23
import yaml
24
25
from collections import OrderedDict
26
27
import mozpack.path as mozpath
28
29
from mach.decorators import (
30
CommandArgument,
31
CommandArgumentGroup,
32
CommandProvider,
33
Command,
34
SettingsProvider,
35
SubCommand,
36
)
37
38
from mach.main import Mach
39
40
from mozbuild.artifact_builds import JOB_CHOICES
41
from mozbuild.base import (
42
BuildEnvironmentNotFoundException,
43
MachCommandBase,
44
MachCommandConditions as conditions,
45
MozbuildObject,
46
)
47
from mozbuild.util import ensureParentDir
48
49
from mozbuild.backend import (
50
backends,
51
)
52
53
from mozversioncontrol import get_repository_object
54
55
BUILD_WHAT_HELP = '''
56
What to build. Can be a top-level make target or a relative directory. If
57
multiple options are provided, they will be built serially. Takes dependency
58
information from `topsrcdir/build/dumbmake-dependencies` to build additional
59
targets as needed. BUILDING ONLY PARTS OF THE TREE CAN RESULT IN BAD TREE
60
STATE. USE AT YOUR OWN RISK.
61
'''.strip()
62
63
64
EXCESSIVE_SWAP_MESSAGE = '''
65
===================
66
PERFORMANCE WARNING
67
68
Your machine experienced a lot of swap activity during the build. This is
69
possibly a sign that your machine doesn't have enough physical memory or
70
not enough available memory to perform the build. It's also possible some
71
other system activity during the build is to blame.
72
73
If you feel this message is not appropriate for your machine configuration,
74
please file a Firefox Build System :: General bug at
76
and tell us about your machine and build configuration so we can adjust the
77
warning heuristic.
78
===================
79
'''
80
81
82
# Function used to run clang-format on a batch of files. It is a helper function
83
# in order to integrate into the futures ecosystem clang-format.
84
def run_one_clang_format_batch(args):
85
try:
86
subprocess.check_output(args)
87
except subprocess.CalledProcessError as e:
88
return e
89
90
91
class StoreDebugParamsAndWarnAction(argparse.Action):
92
def __call__(self, parser, namespace, values, option_string=None):
93
sys.stderr.write('The --debugparams argument is deprecated. Please ' +
94
'use --debugger-args instead.\n\n')
95
setattr(namespace, self.dest, values)
96
97
98
@CommandProvider
99
class Watch(MachCommandBase):
100
"""Interface to watch and re-build the tree."""
101
102
@Command('watch', category='post-build', description='Watch and re-build the tree.',
103
conditions=[conditions.is_firefox])
104
@CommandArgument('-v', '--verbose', action='store_true',
105
help='Verbose output for what commands the watcher is running.')
106
def watch(self, verbose=False):
107
"""Watch and re-build the source tree."""
108
109
if not conditions.is_artifact_build(self):
110
print('mach watch requires an artifact build. See '
112
return 1
113
114
if not self.substs.get('WATCHMAN', None):
115
print('mach watch requires watchman to be installed. See '
117
return 1
118
119
self._activate_virtualenv()
120
try:
121
self.virtualenv_manager.install_pip_package('pywatchman==1.3.0')
122
except Exception:
123
print('Could not install pywatchman from pip. See '
125
return 1
126
127
from mozbuild.faster_daemon import Daemon
128
daemon = Daemon(self.config_environment)
129
130
try:
131
return daemon.watch()
132
except KeyboardInterrupt:
133
# Suppress ugly stack trace when user hits Ctrl-C.
134
sys.exit(3)
135
136
137
@CommandProvider
138
class Build(MachCommandBase):
139
"""Interface to build the tree."""
140
141
@Command('build', category='build', description='Build the tree.')
142
@CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
143
help='Number of concurrent jobs to run. Default is the number of CPUs.')
144
@CommandArgument('-C', '--directory', default=None,
145
help='Change to a subdirectory of the build directory first.')
146
@CommandArgument('what', default=None, nargs='*', help=BUILD_WHAT_HELP)
147
@CommandArgument('-X', '--disable-extra-make-dependencies',
148
default=False, action='store_true',
149
help='Do not add extra make dependencies.')
150
@CommandArgument('-v', '--verbose', action='store_true',
151
help='Verbose output for what commands the build is running.')
152
@CommandArgument('--keep-going', action='store_true',
153
help='Keep building after an error has occurred')
154
def build(self, what=None, disable_extra_make_dependencies=None, jobs=0,
155
directory=None, verbose=False, keep_going=False):
156
"""Build the source tree.
157
158
With no arguments, this will perform a full build.
159
160
Positional arguments define targets to build. These can be make targets
161
or patterns like "<dir>/<target>" to indicate a make target within a
162
directory.
163
164
There are a few special targets that can be used to perform a partial
165
build faster than what `mach build` would perform:
166
167
* binaries - compiles and links all C/C++ sources and produces shared
168
libraries and executables (binaries).
169
170
* faster - builds JavaScript, XUL, CSS, etc files.
171
172
"binaries" and "faster" almost fully complement each other. However,
173
there are build actions not captured by either. If things don't appear to
174
be rebuilding, perform a vanilla `mach build` to rebuild the world.
175
"""
176
from mozbuild.controller.building import (
177
BuildDriver,
178
)
179
180
self.log_manager.enable_all_structured_loggers()
181
182
driver = self._spawn(BuildDriver)
183
return driver.build(
184
what=what,
185
disable_extra_make_dependencies=disable_extra_make_dependencies,
186
jobs=jobs,
187
directory=directory,
188
verbose=verbose,
189
keep_going=keep_going,
190
mach_context=self._mach_context)
191
192
@Command('configure', category='build',
193
description='Configure the tree (run configure and config.status).')
194
@CommandArgument('options', default=None, nargs=argparse.REMAINDER,
195
help='Configure options')
196
def configure(self, options=None, buildstatus_messages=False, line_handler=None):
197
from mozbuild.controller.building import (
198
BuildDriver,
199
)
200
201
self.log_manager.enable_all_structured_loggers()
202
driver = self._spawn(BuildDriver)
203
204
return driver.configure(
205
options=options,
206
buildstatus_messages=buildstatus_messages,
207
line_handler=line_handler)
208
209
@Command('resource-usage', category='post-build',
210
description='Show information about system resource usage for a build.')
211
@CommandArgument('--address', default='localhost',
212
help='Address the HTTP server should listen on.')
213
@CommandArgument('--port', type=int, default=0,
214
help='Port number the HTTP server should listen on.')
215
@CommandArgument('--browser', default='firefox',
216
help='Web browser to automatically open. See webbrowser Python module.')
217
@CommandArgument('--url',
218
help='URL of JSON document to display')
219
def resource_usage(self, address=None, port=None, browser=None, url=None):
220
import webbrowser
221
from mozbuild.html_build_viewer import BuildViewerServer
222
223
server = BuildViewerServer(address, port)
224
225
if url:
226
server.add_resource_json_url('url', url)
227
else:
228
last = self._get_state_filename('build_resources.json')
229
if not os.path.exists(last):
230
print('Build resources not available. If you have performed a '
231
'build and receive this message, the psutil Python package '
232
'likely failed to initialize properly.')
233
return 1
234
235
server.add_resource_json_file('last', last)
236
try:
237
webbrowser.get(browser).open_new_tab(server.url)
238
except Exception:
239
print('Cannot get browser specified, trying the default instead.')
240
try:
241
browser = webbrowser.get().open_new_tab(server.url)
242
except Exception:
243
print('Please open %s in a browser.' % server.url)
244
245
print('Hit CTRL+c to stop server.')
246
server.run()
247
248
@Command('build-backend', category='build',
249
description='Generate a backend used to build the tree.')
250
@CommandArgument('-d', '--diff', action='store_true',
251
help='Show a diff of changes.')
252
# It would be nice to filter the choices below based on
253
# conditions, but that is for another day.
254
@CommandArgument('-b', '--backend', nargs='+', choices=sorted(backends),
255
help='Which backend to build.')
256
@CommandArgument('-v', '--verbose', action='store_true',
257
help='Verbose output.')
258
@CommandArgument('-n', '--dry-run', action='store_true',
259
help='Do everything except writing files out.')
260
def build_backend(self, backend, diff=False, verbose=False, dry_run=False):
261
python = self.virtualenv_manager.python_path
262
config_status = os.path.join(self.topobjdir, 'config.status')
263
264
if not os.path.exists(config_status):
265
print('config.status not found. Please run |mach configure| '
266
'or |mach build| prior to building the %s build backend.'
267
% backend)
268
return 1
269
270
args = [python, config_status]
271
if backend:
272
args.append('--backend')
273
args.extend(backend)
274
if diff:
275
args.append('--diff')
276
if verbose:
277
args.append('--verbose')
278
if dry_run:
279
args.append('--dry-run')
280
281
return self._run_command_in_objdir(args=args, pass_thru=True,
282
ensure_exit_code=False)
283
284
285
@CommandProvider
286
class CargoProvider(MachCommandBase):
287
"""Invoke cargo in useful ways."""
288
289
@Command('cargo', category='build',
290
description='Invoke cargo in useful ways.')
291
def cargo(self):
292
self.parser.print_usage()
293
return 1
294
295
@SubCommand('cargo', 'check',
296
description='Run `cargo check` on a given crate. Defaults to gkrust.')
297
@CommandArgument('--all-crates', default=None, action='store_true',
298
help='Check all of the crates in the tree.')
299
@CommandArgument('crates', default=None, nargs='*', help='The crate name(s) to check.')
300
def check(self, all_crates=None, crates=None):
301
# XXX duplication with `mach vendor rust`
302
crates_and_roots = {
303
'gkrust': 'toolkit/library/rust',
304
'gkrust-gtest': 'toolkit/library/gtest/rust',
305
'js': 'js/rust',
306
'mozjs_sys': 'js/src',
307
'baldrdash': 'js/src/wasm/cranelift',
308
'geckodriver': 'testing/geckodriver',
309
}
310
311
if all_crates:
312
crates = crates_and_roots.keys()
313
elif crates is None or crates == []:
314
crates = ['gkrust']
315
316
for crate in crates:
317
root = crates_and_roots.get(crate, None)
318
if not root:
319
print('Cannot locate crate %s. Please check your spelling or '
320
'add the crate information to the list.' % crate)
321
return 1
322
323
check_targets = [
324
'force-cargo-library-check',
325
'force-cargo-host-library-check',
326
'force-cargo-program-check',
327
'force-cargo-host-program-check',
328
]
329
330
ret = self._run_make(srcdir=False, directory=root,
331
ensure_exit_code=0, silent=True,
332
print_directory=False, target=check_targets)
333
if ret != 0:
334
return ret
335
336
return 0
337
338
339
@CommandProvider
340
class Doctor(MachCommandBase):
341
"""Provide commands for diagnosing common build environment problems"""
342
@Command('doctor', category='devenv',
343
description='')
344
@CommandArgument('--fix', default=None, action='store_true',
345
help='Attempt to fix found problems.')
346
def doctor(self, fix=None):
347
self._activate_virtualenv()
348
from mozbuild.doctor import Doctor
349
doctor = Doctor(self.topsrcdir, self.topobjdir, fix)
350
return doctor.check_all()
351
352
353
@CommandProvider
354
class Clobber(MachCommandBase):
355
NO_AUTO_LOG = True
356
CLOBBER_CHOICES = ['objdir', 'python']
357
358
@Command('clobber', category='build',
359
description='Clobber the tree (delete the object directory).')
360
@CommandArgument('what', default=['objdir'], nargs='*',
361
help='Target to clobber, must be one of {{{}}} (default objdir).'.format(
362
', '.join(CLOBBER_CHOICES)))
363
@CommandArgument('--full', action='store_true',
364
help='Perform a full clobber')
365
def clobber(self, what, full=False):
366
"""Clean up the source and object directories.
367
368
Performing builds and running various commands generate various files.
369
370
Sometimes it is necessary to clean up these files in order to make
371
things work again. This command can be used to perform that cleanup.
372
373
By default, this command removes most files in the current object
374
directory (where build output is stored). Some files (like Visual
375
Studio project files) are not removed by default. If you would like
376
to remove the object directory in its entirety, run with `--full`.
377
378
The `python` target will clean up various generated Python files from
379
the source directory and will remove untracked files from well-known
380
directories containing Python packages. Run this to remove .pyc files,
381
compiled C extensions, etc. Note: all files not tracked or ignored by
382
version control in well-known Python package directories will be
383
deleted. Run the `status` command of your VCS to see if any untracked
384
files you haven't committed yet will be deleted.
385
"""
386
invalid = set(what) - set(self.CLOBBER_CHOICES)
387
if invalid:
388
print('Unknown clobber target(s): {}'.format(', '.join(invalid)))
389
return 1
390
391
ret = 0
392
if 'objdir' in what:
393
from mozbuild.controller.clobber import Clobberer
394
try:
395
Clobberer(self.topsrcdir, self.topobjdir, self.substs).remove_objdir(full)
396
except OSError as e:
397
if sys.platform.startswith('win'):
398
if isinstance(e, WindowsError) and e.winerror in (5, 32):
399
self.log(logging.ERROR, 'file_access_error', {'error': e},
400
"Could not clobber because a file was in use. If the "
401
"application is running, try closing it. {error}")
402
return 1
403
raise
404
405
if 'python' in what:
406
if conditions.is_hg(self):
407
cmd = ['hg', 'purge', '--all', '-I', 'glob:**.py[cdo]',
408
'-I', 'path:python/', '-I', 'path:third_party/python/']
409
elif conditions.is_git(self):
410
cmd = ['git', 'clean', '-f', '-x', '*.py[cdo]', 'python/',
411
'third_party/python/']
412
else:
413
# We don't know what is tracked/untracked if we don't have VCS.
414
# So we can't clean python/ and third_party/python/.
415
cmd = ['find', '.', '-type', 'f', '-name', '*.py[cdo]',
416
'-delete']
417
ret = subprocess.call(cmd, cwd=self.topsrcdir)
418
return ret
419
420
@property
421
def substs(self):
422
try:
423
return super(Clobber, self).substs
424
except BuildEnvironmentNotFoundException:
425
return {}
426
427
428
@CommandProvider
429
class Logs(MachCommandBase):
430
"""Provide commands to read mach logs."""
431
NO_AUTO_LOG = True
432
433
@Command('show-log', category='post-build',
434
description='Display mach logs')
435
@CommandArgument('log_file', nargs='?', type=argparse.FileType('rb'),
436
help='Filename to read log data from. Defaults to the log of the last '
437
'mach command.')
438
def show_log(self, log_file=None):
439
if not log_file:
440
path = self._get_state_filename('last_log.json')
441
log_file = open(path, 'rb')
442
443
if os.isatty(sys.stdout.fileno()):
444
env = dict(os.environ)
445
if 'LESS' not in env:
446
# Sensible default flags if none have been set in the user
447
# environment.
448
env[b'LESS'] = b'FRX'
449
less = subprocess.Popen(['less'], stdin=subprocess.PIPE, env=env)
450
# Various objects already have a reference to sys.stdout, so we
451
# can't just change it, we need to change the file descriptor under
452
# it to redirect to less's input.
453
# First keep a copy of the sys.stdout file descriptor.
454
output_fd = os.dup(sys.stdout.fileno())
455
os.dup2(less.stdin.fileno(), sys.stdout.fileno())
456
457
startTime = 0
458
for line in log_file:
459
created, action, params = json.loads(line)
460
if not startTime:
461
startTime = created
462
self.log_manager.terminal_handler.formatter.start_time = \
463
created
464
if 'line' in params:
465
record = logging.makeLogRecord({
466
'created': created,
467
'name': self._logger.name,
468
'levelno': logging.INFO,
469
'msg': '{line}',
470
'params': params,
471
'action': action,
472
})
473
self._logger.handle(record)
474
475
if self.log_manager.terminal:
476
# Close less's input so that it knows that we're done sending data.
477
less.stdin.close()
478
# Since the less's input file descriptor is now also the stdout
479
# file descriptor, we still actually have a non-closed system file
480
# descriptor for less's input. Replacing sys.stdout's file
481
# descriptor with what it was before we replaced it will properly
482
# close less's input.
483
os.dup2(output_fd, sys.stdout.fileno())
484
less.wait()
485
486
487
@CommandProvider
488
class Warnings(MachCommandBase):
489
"""Provide commands for inspecting warnings."""
490
491
@property
492
def database_path(self):
493
return self._get_state_filename('warnings.json')
494
495
@property
496
def database(self):
497
from mozbuild.compilation.warnings import WarningsDatabase
498
499
path = self.database_path
500
501
database = WarningsDatabase()
502
503
if os.path.exists(path):
504
database.load_from_file(path)
505
506
return database
507
508
@Command('warnings-summary', category='post-build',
509
description='Show a summary of compiler warnings.')
510
@CommandArgument('-C', '--directory', default=None,
511
help='Change to a subdirectory of the build directory first.')
512
@CommandArgument('report', default=None, nargs='?',
513
help='Warnings report to display. If not defined, show the most '
514
'recent report.')
515
def summary(self, directory=None, report=None):
516
database = self.database
517
518
if directory:
519
dirpath = self.join_ensure_dir(self.topsrcdir, directory)
520
if not dirpath:
521
return 1
522
else:
523
dirpath = None
524
525
type_counts = database.type_counts(dirpath)
526
sorted_counts = sorted(type_counts.iteritems(),
527
key=operator.itemgetter(1))
528
529
total = 0
530
for k, v in sorted_counts:
531
print('%d\t%s' % (v, k))
532
total += v
533
534
print('%d\tTotal' % total)
535
536
@Command('warnings-list', category='post-build',
537
description='Show a list of compiler warnings.')
538
@CommandArgument('-C', '--directory', default=None,
539
help='Change to a subdirectory of the build directory first.')
540
@CommandArgument('--flags', default=None, nargs='+',
541
help='Which warnings flags to match.')
542
@CommandArgument('report', default=None, nargs='?',
543
help='Warnings report to display. If not defined, show the most '
544
'recent report.')
545
def list(self, directory=None, flags=None, report=None):
546
database = self.database
547
548
by_name = sorted(database.warnings)
549
550
topsrcdir = mozpath.normpath(self.topsrcdir)
551
552
if directory:
553
directory = mozpath.normsep(directory)
554
dirpath = self.join_ensure_dir(topsrcdir, directory)
555
if not dirpath:
556
return 1
557
558
if flags:
559
# Flatten lists of flags.
560
flags = set(itertools.chain(*[flaglist.split(',') for flaglist in flags]))
561
562
for warning in by_name:
563
filename = mozpath.normsep(warning['filename'])
564
565
if filename.startswith(topsrcdir):
566
filename = filename[len(topsrcdir) + 1:]
567
568
if directory and not filename.startswith(directory):
569
continue
570
571
if flags and warning['flag'] not in flags:
572
continue
573
574
if warning['column'] is not None:
575
print('%s:%d:%d [%s] %s' % (
576
filename, warning['line'], warning['column'],
577
warning['flag'], warning['message']))
578
else:
579
print('%s:%d [%s] %s' % (filename, warning['line'],
580
warning['flag'], warning['message']))
581
582
def join_ensure_dir(self, dir1, dir2):
583
dir1 = mozpath.normpath(dir1)
584
dir2 = mozpath.normsep(dir2)
585
joined_path = mozpath.join(dir1, dir2)
586
if os.path.isdir(joined_path):
587
return joined_path
588
print('Specified directory not found.')
589
return None
590
591
592
@CommandProvider
593
class GTestCommands(MachCommandBase):
594
@Command('gtest', category='testing',
595
description='Run GTest unit tests (C++ tests).')
596
@CommandArgument('gtest_filter', default=b"*", nargs='?', metavar='gtest_filter',
597
help="test_filter is a ':'-separated list of wildcard patterns "
598
"(called the positive patterns), optionally followed by a '-' "
599
"and another ':'-separated pattern list (called the negative patterns).")
600
@CommandArgument('--jobs', '-j', default='1', nargs='?', metavar='jobs', type=int,
601
help='Run the tests in parallel using multiple processes.')
602
@CommandArgument('--tbpl-parser', '-t', action='store_true',
603
help='Output test results in a format that can be parsed by TBPL.')
604
@CommandArgument('--shuffle', '-s', action='store_true',
605
help='Randomize the execution order of tests.')
606
@CommandArgument('--package',
607
default='org.mozilla.geckoview.test',
608
help='(Android only) Package name of test app.')
609
@CommandArgument('--adbpath',
610
dest='adb_path',
611
help='(Android only) Path to adb binary.')
612
@CommandArgument('--deviceSerial',
613
dest='device_serial',
614
help="(Android only) adb serial number of remote device. "
615
"Required when more than one device is connected to the host. "
616
"Use 'adb devices' to see connected devices.")
617
@CommandArgument('--remoteTestRoot',
618
dest='remote_test_root',
619
help='(Android only) Remote directory to use as test root '
620
'(eg. /mnt/sdcard/tests or /data/local/tests).')
621
@CommandArgument('--libxul',
622
dest='libxul_path',
623
help='(Android only) Path to gtest libxul.so.')
624
@CommandArgumentGroup('debugging')
625
@CommandArgument('--debug', action='store_true', group='debugging',
626
help='Enable the debugger. Not specifying a --debugger option will result in '
627
'the default debugger being used.')
628
@CommandArgument('--debugger', default=None, type=str, group='debugging',
629
help='Name of debugger to use.')
630
@CommandArgument('--debugger-args', default=None, metavar='params', type=str,
631
group='debugging',
632
help='Command-line arguments to pass to the debugger itself; '
633
'split as the Bourne shell would.')
634
def gtest(self, shuffle, jobs, gtest_filter, tbpl_parser,
635
package, adb_path, device_serial, remote_test_root, libxul_path,
636
debug, debugger, debugger_args):
637
638
# We lazy build gtest because it's slow to link
639
try:
640
config = self.config_environment
641
except Exception:
642
print("Please run |./mach build| before |./mach gtest|.")
643
return 1
644
645
active_backend = config.substs.get('BUILD_BACKENDS', [None])[0]
646
if 'Tup' in active_backend:
647
gtest_build_target = mozpath.join(self.topobjdir, '<gtest>')
648
else:
649
gtest_build_target = 'recurse_gtest'
650
651
res = self._mach_context.commands.dispatch('build', self._mach_context,
652
what=[gtest_build_target])
653
if res:
654
print("Could not build xul-gtest")
655
return res
656
657
if self.substs.get('MOZ_WIDGET_TOOLKIT') == 'cocoa':
658
self._run_make(directory='browser/app', target='repackage',
659
ensure_exit_code=True)
660
661
cwd = os.path.join(self.topobjdir, '_tests', 'gtest')
662
663
if not os.path.isdir(cwd):
664
os.makedirs(cwd)
665
666
if conditions.is_android(self):
667
if jobs != 1:
668
print("--jobs is not supported on Android and will be ignored")
669
if debug or debugger or debugger_args:
670
print("--debug options are not supported on Android and will be ignored")
671
return self.android_gtest(cwd, shuffle, gtest_filter,
672
package, adb_path, device_serial,
673
remote_test_root, libxul_path)
674
675
if package or adb_path or device_serial or remote_test_root or libxul_path:
676
print("One or more Android-only options will be ignored")
677
678
app_path = self.get_binary_path('app')
679
args = [app_path, '-unittest', '--gtest_death_test_style=threadsafe']
680
681
if sys.platform.startswith('win') and \
682
'MOZ_LAUNCHER_PROCESS' in self.defines:
683
args.append('--wait-for-browser')
684
685
if debug or debugger or debugger_args:
686
args = self.prepend_debugger_args(args, debugger, debugger_args)
687
688
# Use GTest environment variable to control test execution
689
# For details see:
691
gtest_env = {b'GTEST_FILTER': gtest_filter}
692
693
# Note: we must normalize the path here so that gtest on Windows sees
694
# a MOZ_GMP_PATH which has only Windows dir seperators, because
695
# nsIFile cannot open the paths with non-Windows dir seperators.
696
xre_path = os.path.join(os.path.normpath(self.topobjdir), "dist", "bin")
697
gtest_env["MOZ_XRE_DIR"] = xre_path
698
gtest_env["MOZ_GMP_PATH"] = os.pathsep.join(
699
os.path.join(xre_path, p, "1.0")
700
for p in ('gmp-fake', 'gmp-fakeopenh264')
701
)
702
703
gtest_env[b"MOZ_RUN_GTEST"] = b"True"
704
705
if shuffle:
706
gtest_env[b"GTEST_SHUFFLE"] = b"True"
707
708
if tbpl_parser:
709
gtest_env[b"MOZ_TBPL_PARSER"] = b"True"
710
711
if jobs == 1:
712
return self.run_process(args=args,
713
append_env=gtest_env,
714
cwd=cwd,
715
ensure_exit_code=False,
716
pass_thru=True)
717
718
from mozprocess import ProcessHandlerMixin
719
import functools
720
721
def handle_line(job_id, line):
722
# Prepend the jobId
723
line = '[%d] %s' % (job_id + 1, line.strip())
724
self.log(logging.INFO, "GTest", {'line': line}, '{line}')
725
726
gtest_env["GTEST_TOTAL_SHARDS"] = str(jobs)
727
processes = {}
728
for i in range(0, jobs):
729
gtest_env["GTEST_SHARD_INDEX"] = str(i)
730
processes[i] = ProcessHandlerMixin([app_path, "-unittest"],
731
cwd=cwd,
732
env=gtest_env,
733
processOutputLine=[
734
functools.partial(handle_line, i)],
735
universal_newlines=True)
736
processes[i].run()
737
738
exit_code = 0
739
for process in processes.values():
740
status = process.wait()
741
if status:
742
exit_code = status
743
744
# Clamp error code to 255 to prevent overflowing multiple of
745
# 256 into 0
746
if exit_code > 255:
747
exit_code = 255
748
749
return exit_code
750
751
def android_gtest(self, test_dir, shuffle, gtest_filter,
752
package, adb_path, device_serial, remote_test_root, libxul_path):
753
# setup logging for mozrunner
754
from mozlog.commandline import setup_logging
755
format_args = {'level': self._mach_context.settings['test']['level']}
756
default_format = self._mach_context.settings['test']['format']
757
setup_logging('mach-gtest', {}, {default_format: sys.stdout}, format_args)
758
759
# ensure that a device is available and test app is installed
760
from mozrunner.devices.android_device import (verify_android_device, get_adb_path)
761
verify_android_device(self, install=True, app=package, device_serial=device_serial)
762
763
if not adb_path:
764
adb_path = get_adb_path(self)
765
if not libxul_path:
766
libxul_path = os.path.join(self.topobjdir, "dist", "bin", "gtest", "libxul.so")
767
768
# run gtest via remotegtests.py
769
import imp
770
path = os.path.join('testing', 'gtest', 'remotegtests.py')
771
with open(path, 'r') as fh:
772
imp.load_module('remotegtests', fh, path,
773
('.py', 'r', imp.PY_SOURCE))
774
import remotegtests
775
tester = remotegtests.RemoteGTests()
776
tester.run_gtest(test_dir, shuffle, gtest_filter, package, adb_path, device_serial,
777
remote_test_root, libxul_path, None)
778
tester.cleanup()
779
780
return 0
781
782
def prepend_debugger_args(self, args, debugger, debugger_args):
783
'''
784
Given an array with program arguments, prepend arguments to run it under a
785
debugger.
786
787
:param args: The executable and arguments used to run the process normally.
788
:param debugger: The debugger to use, or empty to use the default debugger.
789
:param debugger_args: Any additional parameters to pass to the debugger.
790
'''
791
792
import mozdebug
793
794
if not debugger:
795
# No debugger name was provided. Look for the default ones on
796
# current OS.
797
debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
798
799
if debugger:
800
debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
801
if not debuggerInfo:
802
print("Could not find a suitable debugger in your PATH.")
803
return 1
804
805
# Parameters come from the CLI. We need to convert them before
806
# their use.
807
if debugger_args:
808
from mozbuild import shellutil
809
try:
810
debugger_args = shellutil.split(debugger_args)
811
except shellutil.MetaCharacterException as e:
812
print("The --debugger_args you passed require a real shell to parse them.")
813
print("(We can't handle the %r character.)" % e.char)
814
return 1
815
816
# Prepend the debugger args.
817
args = [debuggerInfo.path] + debuggerInfo.args + args
818
return args
819
820
821
@CommandProvider
822
class ClangCommands(MachCommandBase):
823
@Command('clang-complete', category='devenv',
824
description='Generate a .clang_complete file.')
825
def clang_complete(self):
826
import shlex
827
828
build_vars = {}
829
830
def on_line(line):
831
elements = [s.strip() for s in line.split('=', 1)]
832
833
if len(elements) != 2:
834
return
835
836
build_vars[elements[0]] = elements[1]
837
838
try:
839
old_logger = self.log_manager.replace_terminal_handler(None)
840
self._run_make(target='showbuild', log=False, line_handler=on_line)
841
finally:
842
self.log_manager.replace_terminal_handler(old_logger)
843
844
def print_from_variable(name):
845
if name not in build_vars:
846
return
847
848
value = build_vars[name]
849
850
value = value.replace('-I.', '-I%s' % self.topobjdir)
851
value = value.replace(' .', ' %s' % self.topobjdir)
852
value = value.replace('-I..', '-I%s/..' % self.topobjdir)
853
value = value.replace(' ..', ' %s/..' % self.topobjdir)
854
855
args = shlex.split(value)
856
for i in range(0, len(args) - 1):
857
arg = args[i]
858
859
if arg.startswith(('-I', '-D')):
860
print(arg)
861
continue
862
863
if arg.startswith('-include'):
864
print(arg + ' ' + args[i + 1])
865
continue
866
867
print_from_variable('COMPILE_CXXFLAGS')
868
869
print('-I%s/ipc/chromium/src' % self.topsrcdir)
870
print('-I%s/ipc/glue' % self.topsrcdir)
871
print('-I%s/ipc/ipdl/_ipdlheaders' % self.topobjdir)
872
873
874
@CommandProvider
875
class Package(MachCommandBase):
876
"""Package the built product for distribution."""
877
878
@Command('package', category='post-build',
879
description='Package the built product for distribution as an APK, DMG, etc.')
880
@CommandArgument('-v', '--verbose', action='store_true',
881
help='Verbose output for what commands the packaging process is running.')
882
def package(self, verbose=False):
883
ret = self._run_make(directory=".", target='package',
884
silent=not verbose, ensure_exit_code=False)
885
if ret == 0:
886
self.notify('Packaging complete')
887
return ret
888
889
890
@CommandProvider
891
class Install(MachCommandBase):
892
"""Install a package."""
893
894
@Command('install-desktop', category='post-build',
895
conditional_name='install',
896
conditions=[conditions.is_not_android],
897
description='Install the package on the machine.')
898
@CommandArgument('--verbose', '-v', action='store_true',
899
help='Print verbose output when installing to an Android emulator.')
900
def install(self, verbose=False):
901
ret = self._run_make(directory=".", target='install', ensure_exit_code=False)
902
if ret == 0:
903
self.notify('Install complete')
904
return ret
905
906
907
@SettingsProvider
908
class RunSettings():
909
config_settings = [
910
('runprefs.*', 'string', """
911
Pass a pref into Firefox when using `mach run`, of the form `foo.bar=value`.
912
Prefs will automatically be cast into the appropriate type. Integers can be
913
single quoted to force them to be strings.
914
""".strip()),
915
]
916
917
918
@CommandProvider
919
class RunProgram(MachCommandBase):
920
"""Run the compiled program."""
921
922
prog_group = 'the compiled program'
923
924
@Command('run-desktop', category='post-build',
925
conditional_name='run',
926
conditions=[conditions.is_not_android],
927
description='Run the compiled program, possibly under a debugger or DMD.')
928
@CommandArgument('params', nargs='...', group=prog_group,
929
help='Command-line arguments to be passed through to the program. Not '
930
'specifying a --profile or -P option will result in a temporary profile '
931
'being used.')
932
@CommandArgumentGroup(prog_group)
933
@CommandArgument('--remote', '-r', action='store_true', group=prog_group,
934
help='Do not pass the --no-remote argument by default.')
935
@CommandArgument('--background', '-b', action='store_true', group=prog_group,
936
help='Do not pass the --foreground argument by default on Mac.')
937
@CommandArgument('--noprofile', '-n', action='store_true', group=prog_group,
938
help='Do not pass the --profile argument by default.')
939
@CommandArgument('--disable-e10s', action='store_true', group=prog_group,
940
help='Run the program with electrolysis disabled.')
941
@CommandArgument('--enable-crash-reporter', action='store_true', group=prog_group,
942
help='Run the program with the crash reporter enabled.')
943
@CommandArgument('--setpref', action='append', default=[], group=prog_group,
944
help='Set the specified pref before starting the program. Can be set '
945
'multiple times. Prefs can also be set in ~/.mozbuild/machrc in the '
946
'[runprefs] section - see `./mach settings` for more information.')
947
@CommandArgument('--temp-profile', action='store_true', group=prog_group,
948
help='Run the program using a new temporary profile created inside '
949
'the objdir.')
950
@CommandArgument('--macos-open', action='store_true', group=prog_group,
951
help="On macOS, run the program using the open(1) command. Per open(1), "
952
"the browser is launched \"just as if you had double-clicked the file's "
953
"icon\". The browser can not be launched under a debugger with this option.")
954
@CommandArgumentGroup('debugging')
955
@CommandArgument('--debug', action='store_true', group='debugging',
956
help='Enable the debugger. Not specifying a --debugger option will result '
957
'in the default debugger being used.')
958
@CommandArgument('--debugger', default=None, type=str, group='debugging',
959
help='Name of debugger to use.')
960
@CommandArgument('--debugger-args', default=None, metavar='params', type=str,
961
group='debugging',
962
help='Command-line arguments to pass to the debugger itself; '
963
'split as the Bourne shell would.')
964
@CommandArgument('--debugparams', action=StoreDebugParamsAndWarnAction,
965
default=None, type=str, dest='debugger_args', group='debugging',
966
help=argparse.SUPPRESS)
967
@CommandArgumentGroup('DMD')
968
@CommandArgument('--dmd', action='store_true', group='DMD',
969
help='Enable DMD. The following arguments have no effect without this.')
970
@CommandArgument('--mode', choices=['live', 'dark-matter', 'cumulative', 'scan'], group='DMD',
971
help='Profiling mode. The default is \'dark-matter\'.')
972
@CommandArgument('--stacks', choices=['partial', 'full'], group='DMD',
973
help='Allocation stack trace coverage. The default is \'partial\'.')
974
@CommandArgument('--show-dump-stats', action='store_true', group='DMD',
975
help='Show stats when doing dumps.')
976
def run(self, params, remote, background, noprofile, disable_e10s,
977
enable_crash_reporter, setpref, temp_profile, macos_open, debug,
978
debugger, debugger_args, dmd, mode, stacks, show_dump_stats):
979
980
from mozprofile import Profile, Preferences
981
982
try:
983
binpath = self.get_binary_path('app')
984
except Exception as e:
985
print("It looks like your program isn't built.",
986
"You can run |mach build| to build it.")
987
print(e)
988
return 1
989
990
args = []
991
if macos_open:
992
if debug:
993
print("The browser can not be launched in the debugger "
994
"when using the macOS open command.")
995
return 1
996
try:
997
m = re.search(r'^.+\.app', binpath)
998
apppath = m.group(0)
999
args = ['open', apppath, '--args']
1000
except Exception as e:
1001
print("Couldn't get the .app path from the binary path. "
1002
"The macOS open option can only be used on macOS")
1003
print(e)
1004
return 1
1005
else:
1006
args = [binpath]
1007
1008
if params:
1009
args.extend(params)
1010
1011
if not remote:
1012
args.append('-no-remote')
1013
1014
if not background and sys.platform == 'darwin':
1015
args.append('-foreground')
1016
1017
if sys.platform.startswith('win') and \
1018
'MOZ_LAUNCHER_PROCESS' in self.defines:
1019
args.append('-wait-for-browser')
1020
1021
no_profile_option_given = \
1022
all(p not in params for p in ['-profile', '--profile', '-P'])
1023
if no_profile_option_given and not noprofile:
1024
prefs = {
1025
'browser.aboutConfig.showWarning': False,
1026
'browser.shell.checkDefaultBrowser': False,
1027
'general.warnOnAboutConfig': False,
1028
}
1029
prefs.update(self._mach_context.settings.runprefs)
1030
prefs.update([p.split('=', 1) for p in setpref])
1031
for pref in prefs:
1032
prefs[pref] = Preferences.cast(prefs[pref])
1033
1034
tmpdir = os.path.join(self.topobjdir, 'tmp')
1035
if not os.path.exists(tmpdir):
1036
os.makedirs(tmpdir)
1037
1038
if (temp_profile):
1039
path = tempfile.mkdtemp(dir=tmpdir, prefix='profile-')
1040
else:
1041
path = os.path.join(tmpdir, 'profile-default')
1042
1043
profile = Profile(path, preferences=prefs)
1044
args.append('-profile')
1045
args.append(profile.profile)
1046
1047
if not no_profile_option_given and setpref:
1048
print("setpref is only supported if a profile is not specified")
1049
return 1
1050
1051
if not no_profile_option_given:
1052
# The profile name may be non-ascii, but come from the
1053
# commandline as str, so convert here with a better guess at
1054
# an encoding than the default.
1055
encoding = (sys.getfilesystemencoding() or
1056
sys.getdefaultencoding())
1057
args = [unicode(a, encoding) if not isinstance(a, unicode) else a
1058
for a in args]
1059
1060
extra_env = {
1061
'MOZ_DEVELOPER_REPO_DIR': self.topsrcdir,
1062
'MOZ_DEVELOPER_OBJ_DIR': self.topobjdir,
1063
'RUST_BACKTRACE': 'full',
1064
}
1065
1066
if not enable_crash_reporter:
1067
extra_env['MOZ_CRASHREPORTER_DISABLE'] = '1'
1068
else:
1069
extra_env['MOZ_CRASHREPORTER'] = '1'
1070
1071
if disable_e10s:
1072
extra_env['MOZ_FORCE_DISABLE_E10S'] = '1'
1073
1074
if debug or debugger or debugger_args:
1075
if 'INSIDE_EMACS' in os.environ:
1076
self.log_manager.terminal_handler.setLevel(logging.WARNING)
1077
1078
import mozdebug
1079
if not debugger:
1080
# No debugger name was provided. Look for the default ones on
1081
# current OS.
1082
debugger = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
1083
1084
if debugger:
1085
self.debuggerInfo = mozdebug.get_debugger_info(debugger, debugger_args)
1086
1087
if not debugger or not self.debuggerInfo:
1088
print("Could not find a suitable debugger in your PATH.")
1089
return 1
1090
1091
# Parameters come from the CLI. We need to convert them before
1092
# their use.
1093
if debugger_args:
1094
from mozbuild import shellutil
1095
try:
1096
debugger_args = shellutil.split(debugger_args)
1097
except shellutil.MetaCharacterException as e:
1098
print("The --debugger-args you passed require a real shell to parse them.")
1099
print("(We can't handle the %r character.)" % e.char)
1100
return 1
1101
1102
# Prepend the debugger args.
1103
args = [self.debuggerInfo.path] + self.debuggerInfo.args + args
1104
1105
if dmd:
1106
dmd_params = []
1107
1108
if mode:
1109
dmd_params.append('--mode=' + mode)
1110
if stacks:
1111
dmd_params.append('--stacks=' + stacks)
1112
if show_dump_stats:
1113
dmd_params.append('--show-dump-stats=yes')
1114
1115
if dmd_params:
1116
extra_env['DMD'] = ' '.join(dmd_params)
1117
else:
1118
extra_env['DMD'] = '1'
1119
1120
return self.run_process(args=args, ensure_exit_code=False,
1121
pass_thru=True, append_env=extra_env)
1122
1123
1124
@CommandProvider
1125
class Buildsymbols(MachCommandBase):
1126
"""Produce a package of debug symbols suitable for use with Breakpad."""
1127
1128
@Command('buildsymbols', category='post-build',
1129
description='Produce a package of Breakpad-format symbols.')
1130
def buildsymbols(self):
1131
return self._run_make(directory=".", target='buildsymbols', ensure_exit_code=False)
1132
1133
1134
@CommandProvider
1135
class Makefiles(MachCommandBase):
1136
@Command('empty-makefiles', category='build-dev',
1137
description='Find empty Makefile.in in the tree.')
1138
def empty(self):
1139
import pymake.parser
1140
import pymake.parserdata
1141
1142
IGNORE_VARIABLES = {
1143
'DEPTH': ('@DEPTH@',),
1144
'topsrcdir': ('@top_srcdir@',),
1145
'srcdir': ('@srcdir@',),
1146
'relativesrcdir': ('@relativesrcdir@',),
1147
'VPATH': ('@srcdir@',),
1148
}
1149
1150
IGNORE_INCLUDES = [
1151
'include $(DEPTH)/config/autoconf.mk',
1152
'include $(topsrcdir)/config/config.mk',
1153
'include $(topsrcdir)/config/rules.mk',
1154
]
1155
1156
def is_statement_relevant(s):
1157
if isinstance(s, pymake.parserdata.SetVariable):
1158
exp = s.vnameexp
1159
if not exp.is_static_string:
1160
return True
1161
1162
if exp.s not in IGNORE_VARIABLES:
1163
return True
1164
1165
return s.value not in IGNORE_VARIABLES[exp.s]
1166
1167
if isinstance(s, pymake.parserdata.Include):
1168
if s.to_source() in IGNORE_INCLUDES:
1169
return False
1170
1171
return True
1172
1173
for path in self._makefile_ins():
1174
relpath = os.path.relpath(path, self.topsrcdir)
1175
try:
1176
statements = [s for s in pymake.parser.parsefile(path)
1177
if is_statement_relevant(s)]
1178
1179
if not statements:
1180
print(relpath)
1181
except pymake.parser.SyntaxError:
1182
print('Warning: Could not parse %s' % relpath, file=sys.stderr)
1183
1184
def _makefile_ins(self):
1185
for root, dirs, files in os.walk(self.topsrcdir):
1186
for f in files:
1187
if f == 'Makefile.in':
1188
yield os.path.join(root, f)
1189
1190
1191
@CommandProvider
1192
class MachDebug(MachCommandBase):
1193
@Command('environment', category='build-dev',
1194
description='Show info about the mach and build environment.')
1195
@CommandArgument('--format', default='pretty',
1196
choices=['pretty', 'configure', 'json'],
1197
help='Print data in the given format.')
1198
@CommandArgument('--output', '-o', type=str,
1199
help='Output to the given file.')
1200
@CommandArgument('--verbose', '-v', action='store_true',
1201
help='Print verbose output.')
1202
def environment(self, format, output=None, verbose=False):
1203
func = getattr(self, '_environment_%s' % format.replace('.', '_'))
1204
1205
if output:
1206
# We want to preserve mtimes if the output file already exists
1207
# and the content hasn't changed.
1208
from mozbuild.util import FileAvoidWrite
1209
with FileAvoidWrite(output) as out:
1210
return func(out, verbose)
1211
return func(sys.stdout, verbose)
1212
1213
def _environment_pretty(self, out, verbose):
1214
state_dir = self._mach_context.state_dir
1215
import platform
1216
print('platform:\n\t%s' % platform.platform(), file=out)
1217
print('python version:\n\t%s' % sys.version, file=out)
1218
print('python prefix:\n\t%s' % sys.prefix, file=out)
1219
print('mach cwd:\n\t%s' % self._mach_context.cwd, file=out)
1220
print('os cwd:\n\t%s' % os.getcwd(), file=out)
1221
print('mach directory:\n\t%s' % self._mach_context.topdir, file=out)
1222
print('state directory:\n\t%s' % state_dir, file=out)
1223
1224
print('object directory:\n\t%s' % self.topobjdir, file=out)
1225
1226
if self.mozconfig['path']:
1227
print('mozconfig path:\n\t%s' % self.mozconfig['path'], file=out)
1228
if self.mozconfig['configure_args']:
1229
print('mozconfig configure args:', file=out)
1230
for arg in self.mozconfig['configure_args']:
1231
print('\t%s' % arg, file=out)
1232
1233
if self.mozconfig['make_extra']:
1234
print('mozconfig extra make args:', file=out)
1235
for arg in self.mozconfig['make_extra']:
1236
print('\t%s' % arg, file=out)
1237
1238
if self.mozconfig['make_flags']:
1239
print('mozconfig make flags:', file=out)
1240
for arg in self.mozconfig['make_flags']:
1241
print('\t%s' % arg, file=out)
1242
1243
config = None
1244
1245
try:
1246
config = self.config_environment
1247
1248
except Exception:
1249
pass
1250
1251
if config:
1252
print('config topsrcdir:\n\t%s' % config.topsrcdir, file=out)
1253
print('config topobjdir:\n\t%s' % config.topobjdir, file=out)
1254
1255
if verbose:
1256
print('config substitutions:', file=out)
1257
for k in sorted(config.substs):
1258
print('\t%s: %s' % (k, config.substs[k]), file=out)
1259
1260
print('config defines:', file=out)
1261
for k in sorted(config.defines):
1262
print('\t%s' % k, file=out)
1263
1264
def _environment_json(self, out, verbose):
1265
import json
1266
1267
class EnvironmentEncoder(json.JSONEncoder):
1268
def default(self, obj):
1269
if isinstance(obj, MozbuildObject):
1270
result = {
1271
'topsrcdir': obj.topsrcdir,
1272
'topobjdir': obj.topobjdir,
1273
'mozconfig': obj.mozconfig,
1274
}
1275
if verbose:
1276
result['substs'] = obj.substs
1277
result['defines'] = obj.defines
1278
return result
1279
elif isinstance(obj, set):
1280
return list(obj)
1281
return json.JSONEncoder.default(self, obj)
1282
json.dump(self, cls=EnvironmentEncoder, sort_keys=True, fp=out)
1283
1284
1285
class ArtifactSubCommand(SubCommand):
1286
def __call__(self, func):
1287
after = SubCommand.__call__(self, func)
1288
args = [
1289
CommandArgument('--tree', metavar='TREE', type=str,
1290
help='Firefox tree.'),
1291
CommandArgument('--job', metavar='JOB', choices=JOB_CHOICES,
1292
help='Build job.'),
1293
CommandArgument('--verbose', '-v', action='store_true',
1294
help='Print verbose output.'),
1295
]
1296
for arg in args:
1297
after = arg(after)
1298
return after
1299
1300
1301
class SymbolsAction(argparse.Action):
1302
def __call__(self, parser, namespace, values, option_string=None):
1303
# If this function is called, it means the --symbols option was given,
1304
# so we want to store the value `True` if no explicit value was given
1305
# to the option.
1306
setattr(namespace, self.dest, values or True)
1307
1308
1309
@CommandProvider
1310
class PackageFrontend(MachCommandBase):
1311
"""Fetch and install binary artifacts from Mozilla automation."""
1312
1313
@Command('artifact', category='post-build',
1314
description='Use pre-built artifacts to build Firefox.')
1315
def artifact(self):
1316
'''Download, cache, and install pre-built binary artifacts to build Firefox.
1317
1318
Use |mach build| as normal to freshen your installed binary libraries:
1319
artifact builds automatically download, cache, and install binary
1320
artifacts from Mozilla automation, replacing whatever may be in your
1321
object directory. Use |mach artifact last| to see what binary artifacts
1322
were last used.
1323
1324
Never build libxul again!
1325
1326
'''
1327
pass
1328
1329
def _make_artifacts(self, tree=None, job=None, skip_cache=False,
1330
download_tests=True, download_symbols=False,
1331
download_host_bins=False,
1332
download_maven_zip=False,
1333
no_process=False):
1334
state_dir = self._mach_context.state_dir
1335
cache_dir = os.path.join(state_dir, 'package-frontend')
1336
1337
hg = None
1338
if conditions.is_hg(self):
1339
hg = self.substs['HG']
1340
1341
git = None
1342
if conditions.is_git(self):
1343
git = self.substs['GIT']
1344
1345
# If we're building Thunderbird, we should be checking for comm-central artifacts.
1346
topsrcdir = self.substs.get('commtopsrcdir', self.topsrcdir)
1347
1348
if download_maven_zip:
1349
if download_tests:
1350
raise ValueError('--maven-zip requires --no-tests')
1351
if download_symbols:
1352
raise ValueError('--maven-zip requires no --symbols')
1353
if download_host_bins:
1354
raise ValueError('--maven-zip requires no --host-bins')
1355
if not no_process:
1356
raise ValueError('--maven-zip requires --no-process')
1357
1358
from mozbuild.artifacts import Artifacts
1359
artifacts = Artifacts(tree, self.substs, self.defines, job,
1360
log=self.log, cache_dir=cache_dir,
1361
skip_cache=skip_cache, hg=hg, git=git,
1362
topsrcdir=topsrcdir,
1363
download_tests=download_tests,
1364
download_symbols=download_symbols,
1365
download_host_bins=download_host_bins,
1366
download_maven_zip=download_maven_zip,
1367
no_process=no_process)
1368
return artifacts
1369
1370
@ArtifactSubCommand('artifact', 'install',
1371
'Install a good pre-built artifact.')
1372
@CommandArgument('source', metavar='SRC', nargs='?', type=str,
1373
help='Where to fetch and install artifacts from. Can be omitted, in '
1374
'which case the current hg repository is inspected; an hg revision; '
1375
'a remote URL; or a local file.',
1376
default=None)
1377
@CommandArgument('--skip-cache', action='store_true',
1378
help='Skip all local caches to force re-fetching remote artifacts.',
1379
default=False)
1380
@CommandArgument('--no-tests', action='store_true', help="Don't install tests.")
1381
@CommandArgument('--symbols', nargs='?', action=SymbolsAction, help='Download symbols.')
1382
@CommandArgument('--host-bins', action='store_true', help='Download host binaries.')
1383
@CommandArgument('--distdir', help='Where to install artifacts to.')
1384
@CommandArgument('--no-process', action='store_true',
1385
help="Don't process (unpack) artifact packages, just download them.")
1386
@CommandArgument('--maven-zip', action='store_true', help="Download Maven zip (Android-only).")
1387
def artifact_install(self, source=None, skip_cache=False, tree=None, job=None, verbose=False,
1388
no_tests=False, symbols=False, host_bins=False, distdir=None,
1389
no_process=False, maven_zip=False):
1390
self._set_log_level(verbose)
1391
artifacts = self._make_artifacts(tree=tree, job=job, skip_cache=skip_cache,
1392
download_tests=not no_tests,
1393
download_symbols=symbols,
1394
download_host_bins=host_bins,
1395
download_maven_zip=maven_zip,
1396
no_process=no_process)
1397
1398
return artifacts.install_from(source, distdir or self.distdir)
1399
1400
@ArtifactSubCommand('artifact', 'clear-cache',
1401
'Delete local artifacts and reset local artifact cache.')
1402
def artifact_clear_cache(self, tree=None, job=None, verbose=False):
1403
self._set_log_level(verbose)
1404
artifacts = self._make_artifacts(tree=tree, job=job)
1405
artifacts.clear_cache()
1406
return 0
1407
1408
@SubCommand('artifact', 'toolchain')
1409
@CommandArgument('--verbose', '-v', action='store_true',
1410
help='Print verbose output.')
1411
@CommandArgument('--cache-dir', metavar='DIR',
1412
help='Directory where to store the artifacts cache')
1413
@CommandArgument('--skip-cache', action='store_true',
1414
help='Skip all local caches to force re-fetching remote artifacts.',
1415
default=False)
1416
@CommandArgument('--from-build', metavar='BUILD', nargs='+',
1417
help='Download toolchains resulting from the given build(s); '
1418
'BUILD is a name of a toolchain task, e.g. linux64-clang')
1419
@CommandArgument('--tooltool-manifest', metavar='MANIFEST',
1420
help='Explicit tooltool manifest to process')
1421
@CommandArgument('--authentication-file', metavar='FILE',
1422
help='Use the RelengAPI token found in the given file to authenticate')
1423
@CommandArgument('--tooltool-url', metavar='URL',
1424
help='Use the given url as tooltool server')
1425
@CommandArgument('--no-unpack', action='store_true',
1426
help='Do not unpack any downloaded file')
1427
@CommandArgument('--retry', type=int, default=4,
1428
help='Number of times to retry failed downloads')
1429
@CommandArgument('--artifact-manifest', metavar='FILE',
1430
help='Store a manifest about the downloaded taskcluster artifacts')
1431
@CommandArgument('files', nargs='*',
1432
help='A list of files to download, in the form path@task-id, in '
1433
'addition to the files listed in the tooltool manifest.')
1434
def artifact_toolchain(self, verbose=False, cache_dir=None,
1435
skip_cache=False, from_build=(),
1436
tooltool_manifest=None, authentication_file=None,
1437
tooltool_url=None, no_unpack=False, retry=None,
1438
artifact_manifest=None, files=()):
1439
'''Download, cache and install pre-built toolchains.
1440
'''
1441
from mozbuild.artifacts import ArtifactCache
1442
from mozbuild.action.tooltool import (
1443
FileRecord,
1444
open_manifest,
1445
unpack_file,
1446
)
1447
from requests.adapters import HTTPAdapter
1448
import redo
1449
import requests
1450
1451
from taskgraph.util.taskcluster import (
1452
get_artifact_url,
1453
)
1454
1455
self._set_log_level(verbose)
1456
# Normally, we'd use self.log_manager.enable_unstructured(),
1457
# but that enables all logging, while we only really want tooltool's
1458
# and it also makes structured log output twice.
1459
# So we manually do what it does, and limit that to the tooltool
1460
# logger.
1461
if self.log_manager.terminal_handler:
1462
logging.getLogger('mozbuild.action.tooltool').addHandler(
1463
self.log_manager.terminal_handler)
1464
logging.getLogger('redo').addHandler(
1465
self.log_manager.terminal_handler)
1466
self.log_manager.terminal_handler.addFilter(
1467
self.log_manager.structured_filter)
1468
if not cache_dir:
1469
cache_dir = os.path.join(self._mach_context.state_dir, 'toolchains')
1470
1471
tooltool_url = (tooltool_url or
1473
1474
cache = ArtifactCache(cache_dir=cache_dir, log=self.log,
1475
skip_cache=skip_cache)
1476
1477
if authentication_file:
1478
with open(authentication_file, 'rb') as f:
1479
token = f.read().strip()
1480
1481
class TooltoolAuthenticator(HTTPAdapter):
1482
def send(self, request, *args, **kwargs):
1483
request.headers['Authorization'] = \
1484
'Bearer {}'.format(token)
1485
return super(TooltoolAuthenticator, self).send(
1486
request, *args, **kwargs)
1487
1488
cache._download_manager.session.mount(
1489
tooltool_url, TooltoolAuthenticator())
1490
1491
class DownloadRecord(FileRecord):
1492
def __init__(self, url, *args, **kwargs):
1493
super(DownloadRecord, self).__init__(*args, **kwargs)
1494
self.url = url
1495
self.basename = self.filename
1496
1497
def fetch_with(self, cache):
1498
self.filename = cache.fetch(self.url)
1499
return self.filename
1500
1501
def validate(self):
1502
if self.size is None and self.digest is None:
1503
return True
1504
return super(DownloadRecord, self).validate()
1505
1506
class ArtifactRecord(DownloadRecord):
1507
def __init__(self, task_id, artifact_name):
1508
for _ in redo.retrier(attempts=retry+1, sleeptime=60):
1509
cot = cache._download_manager.session.get(
1510
get_artifact_url(task_id, 'public/chain-of-trust.json'))
1511
if cot.status_code >= 500:
1512
continue
1513
cot.raise_for_status()
1514
break
1515
else:
1516
cot.raise_for_status()
1517
1518
digest = algorithm = None
1519
data = json.loads(cot.content)
1520
for algorithm, digest in (data.get('artifacts', {})
1521
.get(artifact_name, {}).items()):
1522
pass
1523
1524
name = os.path.basename(artifact_name)
1525
artifact_url = get_artifact_url(task_id, artifact_name,
1526
use_proxy=not artifact_name.startswith('public/'))
1527
super(ArtifactRecord, self).__init__(
1528
artifact_url, name,
1529
None, digest, algorithm, unpack=True)
1530
1531
records = OrderedDict()
1532
downloaded = []
1533
1534
if tooltool_manifest:
1535
manifest = open_manifest(tooltool_manifest)
1536
for record in manifest.file_records:
1537
url = '{}/{}/{}'.format(tooltool_url, record.algorithm,
1538
record.digest)
1539
records[record.filename] = DownloadRecord(
1540
url, record.filename, record.size, record.digest,
1541
record.algorithm, unpack=record.unpack,
1542
version=record.version, visibility=record.visibility)
1543
1544
if from_build:
1545
if 'MOZ_AUTOMATION' in os.environ:
1546
self.log(logging.ERROR, 'artifact', {},
1547
'Do not use --from-build in automation; all dependencies '
1548
'should be determined in the decision task.')
1549
return 1
1550
from taskgraph.optimize import IndexSearch
1551
from taskgraph.parameters import Parameters
1552
from taskgraph.generator import load_tasks_for_kind
1553
params = Parameters(
1554
level=os.environ.get('MOZ_SCM_LEVEL', '3'),
1555
strict=False,
1556
)
1557
1558
root_dir = mozpath.join(self.topsrcdir, 'taskcluster/ci')
1559
toolchains = load_tasks_for_kind(params, 'toolchain', root_dir=root_dir)
1560
1561
aliases = {}
1562
for t in toolchains.values():
1563
alias = t.attributes.get('toolchain-alias')
1564
if alias:
1565
aliases['toolchain-{}'.format(alias)] = \
1566
t.task['metadata']['name']
1567
1568
for b in from_build:
1569
user_value = b
1570
1571
if not b.startswith('toolchain-'):
1572
b = 'toolchain-{}'.format(b)
1573
1574
task = toolchains.get(aliases.get(b, b))
1575
if not task:
1576
self.log(logging.ERROR, 'artifact', {'build': user_value},
1577
'Could not find a toolchain build named `{build}`')
1578
return 1
1579
1580
task_id = IndexSearch().should_replace_task(
1581
task, {}, task.optimization.get('index-search', []))
1582
artifact_name = task.attributes.get('toolchain-artifact')
1583
if task_id in (True, False) or not artifact_name:
1584
self.log(logging.ERROR, 'artifact', {'build': user_value},
1585
'Could not find artifacts for a toolchain build '
1586
'named `{build}`. Local commits and other changes '
1587
'in your checkout may cause this error. Try '
1588
'updating to a fresh checkout of mozilla-central '
1589
'to use artifact builds.')
1590
return 1
1591
1592
record = ArtifactRecord(task_id, artifact_name)
1593
records[record.filename] = record
1594
1595
# Handle the list of files of the form path@task-id on the command
1596
# line. Each of those give a path to an artifact to download.
1597
for f in files:
1598
if '@' not in f:
1599
self.log(logging.ERROR, 'artifact', {},
1600
'Expected a list of files of the form path@task-id')
1601
return 1
1602
name, task_id = f.rsplit('@', 1)
1603
record = ArtifactRecord(task_id, name)
1604
records[record.filename] = record
1605
1606
for record in records.itervalues():
1607
self.log(logging.INFO, 'artifact', {'name': record.basename},
1608
'Downloading {name}')
1609
valid = False
1610
# sleeptime is 60 per retry.py, used by tooltool_wrapper.sh
1611
for attempt, _ in enumerate(redo.retrier(attempts=retry+1,
1612
sleeptime=60)):
1613
try:
1614
record.fetch_with(cache)
1615
except (requests.exceptions.HTTPError,
1616
requests.exceptions.ChunkedEncodingError,
1617
requests.exceptions.ConnectionError) as e:
1618
1619
if isinstance(e, requests.exceptions.HTTPError):
1620
# The relengapi proxy likes to return error 400 bad request
1621
# which seems improbably to be due to our (simple) GET
1622
# being borked.
1623
status = e.response.status_code
1624
should_retry = status >= 500 or status == 400
1625
else:
1626
should_retry = True
1627
1628
if should_retry or attempt < retry:
1629
level = logging.WARN
1630
else:
1631
level = logging.ERROR
1632
# e.message is not always a string, so convert it first.
1633
self.log(level, 'artifact', {}, str(e.message))
1634
if not should_retry:
1635
break
1636
if attempt < retry:
1637
self.log(logging.INFO, 'artifact', {},
1638
'Will retry in a moment...')
1639
continue
1640
try:
1641
valid = record.validate()
1642
except Exception:
1643
pass
1644
if not valid:
1645
os.unlink(record.filename)
1646
if attempt < retry:
1647
self.log(logging.INFO, 'artifact', {},
1648
'Corrupt download. Will retry in a moment...')
1649
continue
1650
1651
downloaded.append(record)
1652
break
1653
1654
if not valid:
1655
self.log(logging.ERROR, 'artifact', {'name': record.basename},
1656
'Failed to download {name}')
1657
return 1
1658
1659
artifacts = {} if artifact_manifest else None
1660
1661
for record in downloaded:
1662
local = os.path.join(os.getcwd(), record.basename)
1663
if os.path.exists(local):
1664
os.unlink(local)
1665
# unpack_file needs the file with its final name to work
1667
# need to copy it, even though we remove it later. Use hard links
1668
# when possible.
1669
try:
1670
os.link(record.filename, local)
1671
except Exception:
1672
shutil.copy(record.filename, local)
1673
# Keep a sha256 of each downloaded file, for the chain-of-trust
1674
# validation.
1675
if artifact_manifest is not None:
1676
with open(local) as fh:
1677
h = hashlib.sha256()
1678
while True:
1679
data = fh.read(1024 * 1024)
1680
if not data:
1681
break
1682
h.update(data)
1683
artifacts[record.url] = {
1684
'sha256': h.hexdigest(),
1685
}
1686
if record.unpack and not no_unpack:
1687
unpack_file(local)
1688
os.unlink(local)
1689
1690
if not downloaded:
1691
self.log(logging.ERROR, 'artifact', {}, 'Nothing to download')
1692
if files:
1693
return 1
1694
1695
if artifacts:
1696
ensureParentDir(artifact_manifest)
1697
with open(artifact_manifest, 'w') as fh:
1698
json.dump(artifacts, fh, indent=4, sort_keys=True)
1699
1700
return 0
1701
1702
1703
class StaticAnalysisSubCommand(SubCommand):
1704
def __call__(self, func):
1705
after = SubCommand.__call__(self, func)
1706
args = [
1707
CommandArgument('--verbose', '-v', action='store_true',
1708
help='Print verbose output.'),
1709
]
1710
for arg in args:
1711
after = arg(after)
1712
return after
1713
1714
1715
class StaticAnalysisMonitor(object):
1716
def __init__(self, srcdir, objdir, clang_tidy_config, total):
1717
self._total = total
1718
self._processed = 0
1719
self._current = None
1720
self._srcdir = srcdir
1721
1722
self._clang_tidy_config = clang_tidy_config['clang_checkers']
1723
# Transform the configuration to support Regex
1724
for item in self._clang_tidy_config:
1725
if item['name'] == '-*':
1726
continue
1727
item['name'].replace('*', '.*')
1728
1729
from mozbuild.compilation.warnings import (
1730
WarningsCollector,
1731
WarningsDatabase,
1732
)
1733
1734
self._warnings_database = WarningsDatabase()
1735
1736
def on_warning(warning):
1737
self._warnings_database.insert(warning)
1738
1739
self._warnings_collector = WarningsCollector(on_warning, objdir=objdir)
1740
1741
@property
1742
def num_files(self):
1743
return self._total
1744
1745
@property
1746
def num_files_processed(self):
1747
return self._processed
1748
1749
@property
1750
def current_file(self):
1751
return self._current
1752
1753
@property
1754
def warnings_db(self):
1755
return self._warnings_database
1756
1757
def on_line(self, line):
1758
warning = None
1759
1760
try:
1761
warning = self._warnings_collector.process_line(line)
1762
except Exception:
1763
pass
1764
1765
if line.find('clang-tidy') != -1:
1766
filename = line.split(' ')[-1]
1767
if os.path.isfile(filename):
1768
self._current = os.path.relpath(filename, self._srcdir)
1769
else:
1770
self._current = None
1771
self._processed = self._processed + 1
1772
return (warning, False)
1773
if warning is not None:
1774
def get_reliability(checker_name):
1775
# get the matcher from self._clang_tidy_config that is the 'name' field
1776
reliability = None
1777
for item in self._clang_tidy_config:
1778
if item['name'] == checker_name:
1779
reliability = item.get('reliability', 'low')
1780
break
1781
else:
1782
# We are using a regex in order to also match 'mozilla-.* like checkers'
1783
matcher = re.match(item['name'], checker_name)
1784
if matcher is not None and matcher.group(0) == checker_name:
1785
reliability = item.get('reliability', 'low')
1786
break
1787
return reliability
1788
reliability = get_reliability(warning['flag'])
1789
if reliability is not None:
1790
warning['reliability'] = reliability
1791
return (warning, True)
1792
1793
1794
@CommandProvider
1795
class StaticAnalysis(MachCommandBase):
1796
"""Utilities for running C++ static analysis checks and format."""
1797
1798
# List of file extension to consider (should start with dot)
1799
_format_include_extensions = ('.cpp', '.c', '.cc', '.h', '.m', '.mm')
1800
# File contaning all paths to exclude from formatting
1801
_format_ignore_file = '.clang-format-ignore'
1802
1803
_clang_tidy_config = None
1804
_cov_config = None
1805
1806
@Command('static-analysis', category='testing',
1807
description='Run C++ static analysis checks')
1808
def static_analysis(self):
1809
# If not arguments are provided, just print a help message.
1810
mach = Mach(os.getcwd())
1811
mach.run(['static-analysis', '--help'])
1812
1813
@StaticAnalysisSubCommand('static-analysis', 'check',
1814
'Run the checks using the helper tool')
1815
@CommandArgument('source', nargs='*', default=['.*'],
1816
help='Source files to be analyzed (regex on path). '
1817
'Can be omitted, in which case the entire code base '
1818
'is analyzed. The source argument is ignored if '
1819
'there is anything fed through stdin, in which case '
1820
'the analysis is only performed on the files changed '
1821
'in the patch streamed through stdin. This is called '
1822
'the diff mode.')
1823
@CommandArgument('--checks', '-c', default='-*', metavar='checks',
1824
help='Static analysis checks to enable. By default, this enables only '
1825
'checks that are published here: https://mzl.la/2DRHeTh, but can be any '
1826
'clang-tidy checks syntax.')
1827
@CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
1828
help='Number of concurrent jobs to run. Default is the number of CPUs.')
1829
@CommandArgument('--strip', '-p', default='1', metavar='NUM',
1830
help='Strip NUM leading components from file names in diff mode.')
1831
@CommandArgument('--fix', '-f', default=False, action='store_true',
1832
help='Try to autofix errors detected by clang-tidy checkers.')
1833
@CommandArgument('--header-filter', '-h-f', default='', metavar='header_filter',
1834
help='Regular expression matching the names of the headers to '
1835
'output diagnostics from. Diagnostics from the main file '
1836
'of each translation unit are always displayed')
1837
@CommandArgument('--output', '-o', default=None,
1838
help='Write clang-tidy output in a file')
1839
@CommandArgument('--format', default='text', choices=('text', 'json'),
1840
help='Output format to write in a file')
1841
@CommandArgument('--outgoing', default=False, action='store_true',
1842
help='Run static analysis checks on outgoing files from mercurial repository')
1843
def check(self, source=None, jobs=2, strip=1, verbose=False, checks='-*',
1844
fix=False, header_filter='', output=None, format='text', outgoing=False):
1845
from mozbuild.controller.building import (
1846
StaticAnalysisFooter,
1847
StaticAnalysisOutputManager,
1848
)
1849
1850
self._set_log_level(verbose)
1851
self.log_manager.enable_all_structured_loggers()
1852
1853
rc = self._get_clang_tools(verbose=verbose)
1854
if rc != 0:
1855
return rc
1856
1857
if self._is_version_eligible() is False:
1858
self.log(logging.ERROR, 'static-analysis', {},
1859
"You're using an old version of clang-format binary."
1860
" Please update to a more recent one by running: './mach bootstrap'")
1861
return 1
1862
1863
rc = self._build_compile_db(verbose=verbose)
1864
rc = rc or self._build_export(jobs=jobs, verbose=verbose)
1865
if rc != 0:
1866
return rc
1867
1868
# Use outgoing files instead of source files
1869
if outgoing:
1870
repo = get_repository_object(self.topsrcdir)
1871
files = repo.get_outgoing_files()
1872
source = map(os.path.abspath, files)
1873
1874
# Split in several chunks to avoid hitting Python's limit of 100 groups in re
1875
compile_db = json.loads(open(self._compile_db, 'r').read())
1876
total = 0
1877
import re
1878
chunk_size = 50
1879
for offset in range(0, len(source), chunk_size):
1880
source_chunks = source[offset:offset + chunk_size]
1881
name_re = re.compile('(' + ')|('.join(source_chunks) + ')')
1882
for f in compile_db:
1883
if name_re.search(f['file']):
1884
total = total + 1
1885
1886
if not total:
1887
self.log(logging.INFO, 'static-analysis', {},
1888
"There are no files eligible for analysis. Please note that 'header' files "
1889
"cannot be used for analysis since they do not consist compilation units.")
1890
return 0
1891
1892
cwd = self.topobjdir
1893
self._compilation_commands_path = self.topobjdir
1894
if self._clang_tidy_config is None:
1895
self._clang_tidy_config = self._get_clang_tidy_config()
1896
args = self._get_clang_tidy_command(
1897
checks=checks, header_filter=header_filter, sources=source, jobs=jobs, fix=fix)
1898
1899
monitor = StaticAnalysisMonitor(
1900
self.topsrcdir, self.topobjdir, self._clang_tidy_config, total)
1901
1902
footer = StaticAnalysisFooter(self.log_manager.terminal, monitor)
1903
with StaticAnalysisOutputManager(self.log_manager, monitor, footer) as output_manager:
1904
rc = self.run_process(args=args, ensure_exit_code=False,
1905
line_handler=output_manager.on_line, cwd=cwd)
1906
1907
self.log(logging.WARNING, 'warning_summary',
1908
{'count': len(monitor.warnings_db)},
1909
'{count} warnings present.')
1910
1911
# Write output file
1912
if output is not None:
1913
output_manager.write(output, format)
1914
1915
if rc != 0:
1916
return rc
1917
# if we are building firefox for android it might be nice to
1918
# also analyze the java code base
1919
if self.substs['MOZ_BUILD_APP'] == 'mobile/android':
1920
rc = self.check_java(source, jobs, strip, verbose, skip_export=True)
1921
return rc
1922
1923
@StaticAnalysisSubCommand('static-analysis', 'check-coverity',
1924
'Run coverity static-analysis tool on the given files. '
1925
'Can only be run by automation! '
1926
'It\'s result is stored as an json file on the artifacts server.')
1927
@CommandArgument('source', nargs='*', default=[],
1928
help='Source files to be analyzed by Coverity Static Analysis Tool. '
1929
'This is ran only in automation.')
1930
@CommandArgument('--output', '-o', default=None,
1931
help='Write coverity output translated to json output in a file')
1932
@CommandArgument('--coverity_output_path', '-co', default=None,
1933
help='Path where to write coverity results as cov-results.json. '
1934
'If no path is specified the default path from the coverity working '
1935
'directory, ~./mozbuild/coverity is used.')
1936
@CommandArgument('--outgoing', default=False, action='store_true',
1937
help='Run coverity on outgoing files from mercurial or git repository')
1938
def check_coverity(self, source=[], output=None, coverity_output_path=None,
1939
outgoing=False, verbose=False):
1940
self._set_log_level(verbose)
1941
self.log_manager.enable_all_structured_loggers()
1942
1943
if 'MOZ_AUTOMATION' not in os.environ:
1944
self.log(logging.INFO, 'static-analysis', {},
1945
'Coverity based static-analysis cannot be ran outside automation.')
1946
return
1947
1948
# Use outgoing files instead of source files
1949
if outgoing:
1950
repo = get_repository_object(self.topsrcdir)
1951
files = repo.get_outgoing_files()
1952
source = map(os.path.abspath, files)
1953
1954
if len(source) == 0:
1955
self.log(logging.ERROR, 'static-analysis', {},
1956
'There are no files that coverity can use to scan.')
1957
return 0
1958
1959
rc = self._build_compile_db(verbose=verbose)
1960
rc = rc or self._build_export(jobs=2, verbose=verbose)
1961
1962
if rc != 0:
1963
return rc
1964
1965
commands_list = self.get_files_with_commands(source)
1966
if len(commands_list) == 0:
1967
self.log(logging.INFO, 'static-analysis', {},
1968
'There are no files that need to be analyzed.')
1969
return 0
1970
1971
# Load the configuration file for coverity static-analysis
1972
# For the moment we store only the reliability index for each checker
1973
# as the rest is managed on the https://github.com/mozilla/release-services side.
1974
self._cov_config = self._get_cov_config()
1975
1976
rc = self.setup_coverity()
1977
if rc != 0:
1978
return rc
1979
1980
# First run cov-run-desktop --setup in order to setup the analysis env
1981
cmd = [self.cov_run_desktop, '--setup']
1982
self.log(logging.INFO, 'static-analysis', {},
1983
'Running {} --setup'.format(self.cov_run_desktop))
1984
1985
rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
1986
1987
if rc != 0:
1988
self.log(logging.ERROR, 'static-analysis', {},
1989
'Running {} --setup failed!'.format(self.cov_run_desktop))
1990
return rc
1991
1992
# Run cov-configure for clang
1993
cmd = [self.cov_configure, '--clang']
1994
self.log(logging.INFO, 'static-analysis', {},
1995
'Running {} --clang'.format(self.cov_configure))
1996
1997
rc = self.run_process(args=cmd, cwd=self.cov_path, pass_thru=True)
1998
1999
if rc != 0:
2000
self.log(logging.ERROR, 'static-analysis', {},
2001
'Running {} --clang failed!'.format(self.cov_configure))
2002
return rc
2003
2004
# For each element in commands_list run `cov-translate`
2005
for element in commands_list:
2006
cmd = [self.cov_translate, '--dir', self.cov_idir_path] + element['command'].split(' ')
2007
self.log(logging.INFO, 'static-analysis', {},
2008
'Running Coverity Tranlate for {}'.format(cmd))
2009
rc = self.run_process(args=cmd, cwd=element['directory'], pass_thru=True)
2010
if rc != 0:
2011
self.log(logging.ERROR, 'static-analysis', {},
2012
'Running Coverity Tranlate failed for {}'.format(cmd))
2013
return cmd
2014
2015
if coverity_output_path is None:
2016
cov_result = mozpath.join(self.cov_state_path, 'cov-results.json')
2017
else:
2018
cov_result = mozpath.join(coverity_output_path, 'cov-results.json')
2019
2020
# Once the capture is performed we need to do the actual Coverity Desktop analysis
2021
cmd = [self.cov_run_desktop, '--json-output-v6', cov_result, '--analyze-captured-source']
2022
self.log(logging.INFO, 'static-analysis', {},
2023
'Running Coverity Analysis for {}'.format(cmd))
2024
rc = self.run_process(cmd, cwd=self.cov_state_path, pass_thru=True)
2025
if rc != 0:
2026
self.log(logging.ERROR, 'static-analysis', {}, 'Coverity Analysis failed!')
2027
2028
if output is not None:
2029
self.dump_cov_artifact(cov_result, source, output)
2030
2031
def get_reliability_index_for_cov_checker(self, checker_name):
2032
if self._cov_config is None:
2033
self.log(logging.INFO, 'static-analysis', {}, 'Coverity config file not found, '
2034
'using default-value \'reliablity\' = medium. for checker {}'.format(
2035
checker_name))
2036
return 'medium'
2037
2038
checkers = self._cov_config['coverity_checkers']
2039
if checker_name not in checkers:
2040
self.log(logging.INFO, 'static-analysis', {},
2041
'Coverity checker {} not found to determine reliability index. '
2042
'For the moment we shall use the default \'reliablity\' = medium.'.format(
2043
checker_name))
2044
return 'medium'
2045
2046
if 'reliability' not in checkers[checker_name]:
2047
# This checker doesn't have a reliability index
2048
self.log(logging.INFO, 'static-analysis', {},
2049
'Coverity checker {} doesn\'t have a reliability index set, '
2050
'field \'reliability is missing\', please cosinder adding it. '
2051
'For the moment we shall use the default \'reliablity\' = medium.'.format(
2052
checker_name))
2053
return 'medium'
2054
2055
return checkers[checker_name]['reliability']
2056
2057
def dump_cov_artifact(self, cov_results, source, output):
2058
# Parse Coverity json into structured issues
2059
with open(cov_results) as f:
2060
result = json.load(f)
2061
2062
# Parse the issues to a standard json format
2063
issues_dict = {'files': {}}
2064
2065
files_list = issues_dict['files']
2066
2067
def build_element(issue):
2068
# We look only for main event
2069
event_path = next(
2070
(event for event in issue['events'] if event['main'] is True), None)
2071
2072
dict_issue = {
2073
'line': issue['mainEventLineNumber'],
2074
'flag': issue['checkerName'],
2075
'message': event_path['eventDescription'],
2076
'reliability': self.get_reliability_index_for_cov_checker(
2077
issue['checkerName']
2078
),
2079
'extra': {
2080
'category': issue['checkerProperties']['category'],
2081
'stateOnServer': issue['stateOnServer'],
2082
'stack': []
2083
}
2084
}
2085
2086
# Embed all events into extra message
2087
for event in issue['events']:
2088
dict_issue['extra']['stack'].append(
2089
{'file_path': event['strippedFilePathname'],
2090
'line_number': event['lineNumber'],
2091
'path_type': event['eventTag'],
2092
'description': event['eventDescription']})
2093
2094
return dict_issue
2095
2096
for issue in result['issues']:
2097
path = self.cov_is_file_in_source(issue['strippedMainEventFilePathname'], source)
2098
if path is None:
2099
# Since we skip a result we should log it
2100
self.log(logging.INFO, 'static-analysis', {},
2101
'Skipping CID: {0} from file: {1} since it\'s not related '
2102
'with the current patch.'.format(
2103
issue['stateOnServer']['cid'],
2104
issue['strippedMainEventFilePathname'])
2105
)
2106
continue
2107
if path in files_list:
2108
files_list[path]['warnings'].append(build_element(issue))
2109
else:
2110
files_list[path] = {'warnings': [build_element(issue)]}
2111
2112
with open(output, 'w') as f:
2113
json.dump(issues_dict, f)
2114
2115
def get_coverity_secrets(self):
2116
from taskgraph.util.taskcluster import get_root_url
2117
2118
secret_name = 'project/relman/coverity'
2119
secrets_url = '{}/secrets/v1/secret/{}'.format(get_root_url(True), secret_name)
2120
2121
self.log(logging.INFO, 'static-analysis', {},
2122
'Using symbol upload token from the secrets service: "{}"'.format(secrets_url))
2123
2124
import requests
2125
res = requests.get(secrets_url)
2126
res.raise_for_status()
2127
secret = res.json()
2128
cov_config = secret['secret'] if 'secret' in secret else None
2129
2130
if cov_config is None:
2131
self.log(logging.ERROR, 'static-analysis', {},
2132
'Ill formatted secret for Coverity. Aborting analysis.')
2133
return 1
2134
2135
self.cov_analysis_url = cov_config.get('package_url')
2136
self.cov_package_name = cov_config.get('package_name')
2137
self.cov_url = cov_config.get('server_url')
2138
# In case we don't have a port in the secret we use the default one,
2139
# for a default coverity deployment.
2140
self.cov_port = cov_config.get('server_port', 8443)
2141
self.cov_auth = cov_config.get('auth_key')
2142
self.cov_package_ver = cov_config.get('package_ver')
2143
self.cov_full_stack = cov_config.get('full_stack', False)
2144
2145
return 0
2146
2147
def download_coverity(self):
2148
if self.cov_url is None or self.cov_port is None or \
2149
self.cov_analysis_url is None or \
2150
self.cov_auth is None:
2151
self.log(logging.ERROR, 'static-analysis', {}, 'Missing Coverity secret on try job!')
2152
return 1
2153
2154
COVERITY_CONFIG = '''
2155
{
2156
"type": "Coverity configuration",
2157
"format_version": 1,
2158
"settings": {
2159
"server": {
2160
"host": "%s",
2161
"ssl" : true,
2162
"port": %s,
2163
"on_new_cert" : "trust",
2164
"auth_key_file": "%s"
2165
},
2166
"stream": "Firefox",
2167
"cov_run_desktop": {
2168
"build_cmd": [],
2169
"clean_cmd": []
2170
}
2171
}
2172
}
2173
'''
2174
# Generate the coverity.conf and auth files
2175
cov_auth_path = mozpath.join(self.cov_state_path, 'auth')
2176
cov_setup_path = mozpath.join(self.cov_state_path, 'coverity.conf')
2177
cov_conf = COVERITY_CONFIG % (self.cov_url, self.cov_port, cov_auth_path)
2178
2179
def download(artifact_url, target):
2180
import requests
2181
resp = requests.get(artifact_url, verify=False, stream=True)
2182
resp.raise_for_status()
2183
2184
# Extract archive into destination
2185
with tarfile.open(fileobj=io.BytesIO(resp.content)) as tar:
2186
tar.extractall(target)
2187
2188
download(self.cov_analysis_url, self.cov_state_path)
2189
2190
with open(cov_auth_path, 'w') as f:
2191
f.write(self.cov_auth)
2192
2193
# Modify it's permission to 600
2194
os.chmod(cov_auth_path, 0o600)
2195
2196
with open(cov_setup_path, 'a') as f:
2197
f.write(cov_conf)
2198
2199
def setup_coverity(self, force_download=True):
2200
rc, config, _ = self._get_config_environment()
2201
rc = rc or self.get_coverity_secrets()
2202
2203
if rc != 0:
2204
return rc
2205
2206
# Create a directory in mozbuild where we setup coverity
2207
self.cov_state_path = mozpath.join(self._mach_context.state_dir, "coverity")
2208
2209
if force_download is True and os.path.exists(self.cov_state_path):
2210
shutil.rmtree(self.cov_state_path)
2211
2212
os.mkdir(self.cov_state_path)
2213
2214
# Download everything that we need for Coverity from out private instance
2215
self.download_coverity()
2216
2217
self.cov_path = mozpath.join(self.cov_state_path, self.cov_package_name)
2218
self.cov_run_desktop = mozpath.join(self.cov_path, 'bin', 'cov-run-desktop')
2219
self.cov_translate = mozpath.join(self.cov_path, 'bin', 'cov-translate')
2220
self.cov_configure = mozpath.join(self.cov_path, 'bin', 'cov-configure')
2221
self.cov_work_path = mozpath.join(self.cov_state_path, 'data-coverity')
2222
self.cov_idir_path = mozpath.join(self.cov_work_path, self.cov_package_ver, 'idir')
2223
2224
if not os.path.exists(self.cov_path):
2225
self.log(logging.ERROR, 'static-analysis', {},
2226
'Missing Coverity in {}'.format(self.cov_path))
2227
return 1
2228
2229
return 0
2230
2231
def cov_is_file_in_source(self, abs_path, source):
2232
# We have as an input an absolute path for whom we verify if it's a symlink,
2233
# if so, we follow that symlink and we match it with elements from source.
2234
# If the match is done we return abs_path, otherwise None
2235
assert isinstance(source, list)
2236
if os.path.islink(abs_path):
2237
abs_path = os.path.realpath(abs_path)
2238
if abs_path in source:
2239
return abs_path
2240
return None
2241
2242
def get_files_with_commands(self, source):
2243
'''
2244
Returns an array of dictionaries having file_path with build command
2245
'''
2246
2247
compile_db = json.load(open(self._compile_db, 'r'))
2248
2249
commands_list = []
2250
2251
for f in source:
2252
# It must be a C/C++ file
2253
_, ext = os.path.splitext(f)
2254
2255
if ext.lower() not in self._format_include_extensions:
2256
self.log(logging.INFO, 'static-analysis', {}, 'Skipping {}'.format(f))
2257
continue
2258
file_with_abspath = os.path.join(self.topsrcdir, f)
2259
for f in compile_db:
2260
# Found for a file that we are looking
2261
if file_with_abspath == f['file']:
2262
commands_list.append(f)
2263
2264
return commands_list
2265
2266
@StaticAnalysisSubCommand('static-analysis', 'check-java',
2267
'Run infer on the java codebase.')
2268
@CommandArgument('source', nargs='*', default=['mobile'],
2269
help='Source files to be analyzed. '
2270
'Can be omitted, in which case the entire code base '
2271
'is analyzed. The source argument is ignored if '
2272
'there is anything fed through stdin, in which case '
2273
'the analysis is only performed on the files changed '
2274
'in the patch streamed through stdin. This is called '
2275
'the diff mode.')
2276
@CommandArgument('--checks', '-c', default=[], metavar='checks', nargs='*',
2277
help='Static analysis checks to enable.')
2278
@CommandArgument('--jobs', '-j', default='0', metavar='jobs', type=int,
2279
help='Number of concurrent jobs to run.'
2280
' Default is the number of CPUs.')
2281
@CommandArgument('--task', '-t', type=str,
2282
default='compileWithGeckoBinariesDebugSources',
2283
help='Which gradle tasks to use to compile the java codebase.')
2284
@CommandArgument('--outgoing', default=False, action='store_true',
2285
help='Run infer checks on outgoing files from repository')
2286
@CommandArgument('--output', default=None,
2287
help='Write infer json output in a file')
2288
def check_java(self, source=['mobile'], jobs=2, strip=1, verbose=False, checks=[],
2289
task='compileWithGeckoBinariesDebugSources',
2290
skip_export=False, outgoing=False, output=None):
2291
self._set_log_level(verbose)
2292
self.log_manager.enable_all_structured_loggers()
2293
if self.substs['MOZ_BUILD_APP'] != 'mobile/android':
2294
self.log(logging.WARNING, 'static-analysis', {},
2295
'Cannot check java source code unless you are building for android!')
2296
return 1
2297
rc = self._check_for_java()
2298
if rc != 0:
2299
return