Source code
Revision control
Copy as Markdown
Other Tools
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# Environment variables that impact the compilation step
# ==============================================================
option(
env="HOST_CPPFLAGS",
help="Extra flags for Preprocessing host sources",
nargs=1,
default="",
)
option(
env="HOST_CFLAGS",
help="Extra flags for compiling host C sources",
nargs=1,
default="",
)
option(
env="HOST_CXXFLAGS",
help="Extra flags for compiling host C++ sources",
nargs=1,
default="",
)
option(
env="HOST_LDFLAGS",
help="Extra flags for linking host object files",
nargs=1,
default="",
)
option(
env="CPPFLAGS",
help="Extra flags for preprocessing sources",
nargs=1,
default="",
)
option(
env="CFLAGS",
help="Extra flags for compiling C sources",
nargs=1,
default="",
)
option(
env="CXXFLAGS",
help="Extra flags for compiling C++ sources",
nargs=1,
default="",
)
option(
env="ASFLAGS",
help="Extra flags for assembling sources",
nargs=1,
default="",
)
option(
env="LDFLAGS",
help="Extra flags for linking object files",
nargs=1,
default="",
)
option(
env="MOZ_OPTIMIZE_FLAGS",
help="Extra optimization flags",
nargs=1,
)
# Code optimization
# ==============================================================
option("--disable-optimize", nargs="?", help="Disable optimizations via compiler flags")
@depends(target, when="MOZ_PGO")
def forced_pgo_optimization_level(target):
if target.kernel == "Linux" and target.os != "Android":
return "-O3"
@imports(_from="mozbuild.shellutil", _import="quote")
def check_optimize_flags(src, flags):
for flag in reversed(flags):
if flag.startswith(("-O", "/O")):
if flag[2:] == "0":
die(
f"Optimization enabled through {src} but last optimization flag is {flag} which disables optimizations"
)
break
else:
die(
f"Optimization enabled through {src} but no optimization flag found in {quote(*flags)}"
)
return flags
@depends("--enable-optimize", "MOZ_OPTIMIZE_FLAGS")
@imports(_from="mozbuild.shellutil", _import="split")
def configured_moz_optimize_flags(enable_optimize, env_flags):
if len(enable_optimize):
return check_optimize_flags("--enable-optimize", split(enable_optimize[0]))
if len(env_flags):
return check_optimize_flags("MOZ_OPTIMIZE_FLAGS", split(env_flags[0]))
@depends("--enable-optimize", "MOZ_OPTIMIZE_FLAGS")
def moz_optimize(enable_optimize, env_flags):
return "1" if enable_optimize or env_flags else None
set_config("MOZ_OPTIMIZE", moz_optimize)
@depends(
target, moz_optimize, configured_moz_optimize_flags, forced_pgo_optimization_level
)
@imports(_from="mozbuild.shellutil", _import="split")
def moz_optimize_flags(
target, moz_optimize, configured_moz_optimize_flags, forced_pgo_optimization_level
):
if configured_moz_optimize_flags:
return configured_moz_optimize_flags
if moz_optimize and forced_pgo_optimization_level:
return [forced_pgo_optimization_level]
if target.kernel == "Darwin":
return ["-O3"]
elif target.kernel in ("Linux", "WINNT"):
return ["-O2"]
else:
return ["-O"]
# Android NDK
# ==============================================================
@depends("--disable-compile-environment", target)
def compiling_android(compile_env, target):
return compile_env and target.os == "Android"
include("android-ndk.configure", when=compiling_android)
with only_when(target_is_osx):
# MacOS deployment target version
# ==============================================================
# This needs to happen before any compilation test is done.
option(
"--enable-macos-target",
env="MACOSX_DEPLOYMENT_TARGET",
nargs=1,
default=depends(target, developer_options)
# We continue to target 10.15 on Intel, but can target 11.0 for
# aarch64 since the earliest hardware was released alongside 11.0.
# For local builds, we want to target 10.15 regardless of the
# underlying platform to catch any errors or warnings that wouldn't
# show up when targeting 11.0, since these would later show up on
# CI for Intel builds.
(lambda t, d: "11.0" if (t.cpu == "aarch64" and not d) else "10.15"),
help="Set the minimum MacOS version needed at runtime{|}",
)
@depends_if("--enable-macos-target", developer_options)
def macos_target(value, _):
return value[0]
@imports("plistlib")
@imports(_from="__builtin__", _import="open")
@imports(_from="__builtin__", _import="Exception")
def get_sdk_settings(sdk):
with open(os.path.join(sdk, "SDKSettings.plist"), "rb") as plist:
obj = plistlib.load(plist)
if not obj:
raise Exception(
"Error parsing SDKSettings.plist in the SDK directory: %s" % sdk
)
return obj
@imports(_from="__builtin__", _import="Exception")
def get_sdk_version_from_settings(sdk, settings):
if "Version" not in settings:
raise Exception(
"Error finding Version information in SDKSettings.plist from the SDK: %s"
% sdk
)
return Version(settings["Version"])
def get_sdk_version(sdk):
return get_sdk_version_from_settings(sdk, get_sdk_settings(sdk))
with only_when(host_is_osx | target_is_osx):
# MacOS SDK
# =========
option(
"--with-macos-sdk",
env="MACOS_SDK_DIR",
nargs=1,
help="Location of platform SDK to use",
)
def mac_sdk_min_version():
return "14.4"
@depends(
"--with-macos-sdk",
host,
bootstrap_path(
"MacOSX{}.sdk".format(mac_sdk_min_version()),
when=depends("--with-macos-sdk")(lambda x: not x),
allow_failure=True,
),
)
@imports(_from="__builtin__", _import="Exception")
@imports(_from="os.path", _import="isdir")
@imports(_from="os", _import="listdir")
def macos_sdk(sdk, host, bootstrapped):
if bootstrapped:
sdk = [bootstrapped]
if sdk:
sdk = sdk[0]
try:
version = get_sdk_version(sdk)
except Exception as e:
die(e)
elif host.os == "OSX":
sdk = check_cmd_output(
"xcrun", "--show-sdk-path", onerror=lambda: ""
).rstrip()
if not sdk:
die(
"Could not find the macOS SDK. Please use --with-macos-sdk to give "
"the path to a macOS SDK."
)
# Scan the parent directory xcrun returns for the most recent SDK.
sdk_dir = os.path.dirname(sdk)
versions = []
for d in listdir(sdk_dir):
if d.lower().startswith("macos"):
try:
sdk = os.path.join(sdk_dir, d)
versions.append((get_sdk_version(sdk), sdk))
except Exception:
pass
version, sdk = max(versions)
else:
die(
"Need a macOS SDK when targeting macOS. Please use --with-macos-sdk "
"to give the path to a macOS SDK."
)
if not isdir(sdk):
die(
"SDK not found in %s. When using --with-macos-sdk, you must specify a "
"valid SDK. SDKs are installed when the optional cross-development "
"tools are selected during the Xcode/Developer Tools installation."
% sdk
)
if version < Version(mac_sdk_min_version()):
die(
'SDK version "%s" is too old. Please upgrade to at least %s. Try '
"updating your system Xcode." % (version, mac_sdk_min_version())
)
return sdk
set_config("MACOS_SDK_DIR", macos_sdk)
with only_when(target_is_osx):
with only_when(cross_compiling):
option(
"--with-macos-private-frameworks",
env="MACOS_PRIVATE_FRAMEWORKS_DIR",
nargs=1,
help="Location of private frameworks to use",
)
@depends_if("--with-macos-private-frameworks")
@imports(_from="os.path", _import="isdir")
def macos_private_frameworks(value):
if value and not isdir(value[0]):
die(
"PrivateFrameworks not found not found in %s. When using "
"--with-macos-private-frameworks, you must specify a valid "
"directory",
value[0],
)
return value[0]
@depends(macos_private_frameworks, macos_sdk)
def macos_private_frameworks(value, sdk):
if value:
return value
return os.path.join(sdk or "/", "System/Library/PrivateFrameworks")
set_config("MACOS_PRIVATE_FRAMEWORKS_DIR", macos_private_frameworks)
with only_when(target_is_ios):
# iOS deployment target version
# ==============================================================
# This needs to happen before any compilation test is done.
option(
"--enable-ios-target",
env="IPHONEOS_DEPLOYMENT_TARGET",
nargs=1,
default="17.4",
help="Set the minimum iOS version needed at runtime",
)
@depends_if("--enable-ios-target")
def ios_target(value):
return value[0]
with only_when(target_is_ios):
# iOS SDK
# =======
option(
"--with-ios-sdk",
env="IPHONEOS_SDK_DIR",
nargs=1,
help="Location of platform SDK to use",
)
@depends(target)
def target_is_ios_simulator(target):
# x86_64-apple-ios is simulator
# aarch64-apple-ios is iphone
# aarch64-apple-ios-sim is simulator
return target.cpu == "x86_64" or target.raw_os == "ios-sim"
def ios_sdk_min_version():
return "17.4"
@depends(target_is_ios_simulator)
def ios_sdk_name(target_is_ios_simulator):
return "iPhone{}{}.sdk".format(
"Simulator" if target_is_ios_simulator else "OS",
ios_sdk_min_version(),
)
@depends(
"--with-ios-sdk",
host,
target,
target_is_ios_simulator,
bootstrap_path(ios_sdk_name, when=depends("--with-ios-sdk")(lambda x: not x)),
)
@imports(_from="__builtin__", _import="Exception")
@imports(_from="os.path", _import="isdir")
@imports(_from="os", _import="listdir")
def ios_sdk(sdk, host, target, target_is_ios_simulator, bootstrapped):
if bootstrapped:
sdk = [bootstrapped]
sdk_name = "iphonesimulator" if target_is_ios_simulator else "iphoneos"
if sdk:
sdk = sdk[0]
try:
settings = get_sdk_settings(sdk)
version = get_sdk_version_from_settings(sdk, settings)
except Exception as e:
die(e)
elif host.os == "OSX":
sdk = check_cmd_output(
"xcrun", "--show-sdk-path", "--sdk", sdk_name, onerror=lambda: ""
).rstrip()
if not sdk:
die(
"Could not find the iOS SDK. Please use --with-ios-sdk to give "
"the path to a iOS SDK."
)
# Scan the parent directory xcrun returns for the most recent SDK.
sdk_dir = os.path.dirname(sdk)
versions = []
for d in listdir(sdk_dir):
if d.lower().startswith(sdk_name):
try:
sdk = os.path.join(sdk_dir, d)
settings = get_sdk_settings(sdk)
version = get_sdk_version_from_settings(sdk, settings)
versions.append((version, sdk, settings))
except Exception:
pass
version, sdk, settings = max(versions)
else:
die(
"Need an iOS SDK when targeting iOS. Please use --with-ios-sdk "
"to give the path to a iOS SDK."
)
if not isdir(sdk):
die(
"SDK not found in %s. When using --with-ios-sdk, you must specify a "
"valid SDK. SDKs are installed when the optional cross-development "
"tools are selected during the Xcode installation." % sdk
)
supported_targets = settings.get("SupportedTargets")
if supported_targets:
supported_archs = supported_targets.get(sdk_name, {}).get("Archs", [])
cpu = {
"aarch64": "arm64",
}.get(target.cpu, str(target.cpu))
if cpu not in supported_archs:
die("The SDK in %s does not support target %s" % (sdk, target.alias))
else:
log.warning(
"Cannot check whether the iOS SDK is for the right target, "
"assuming it is."
)
if version < Version(ios_sdk_min_version()):
die(
'SDK version "%s" is too old. Please upgrade to at least %s. Try '
"updating your system Xcode." % (version, ios_sdk_min_version())
)
return sdk
set_config("IPHONEOS_SDK_DIR", ios_sdk)
# GC rooting and hazard analysis.
# ==============================================================
option(env="MOZ_HAZARD", help="Build for the GC rooting hazard analysis")
@depends("MOZ_HAZARD")
def hazard_analysis(value):
if value:
return True
# Cross-compilation related things.
# ==============================================================
option(
"--with-toolchain-prefix",
env="TOOLCHAIN_PREFIX",
nargs=1,
help="Prefix for the target toolchain",
)
@depends("--with-toolchain-prefix", host, target, cross_compiling)
def toolchain_prefix(value, host, target, cross_compiling):
if value:
return tuple(value)
# We don't want a toolchain prefix by default when building on mac for mac.
if cross_compiling and not (target.os == "OSX" and host.os == "OSX"):
return ("%s-" % target.toolchain, "%s-" % target.alias)
# Compilers
# ==============================================================
include("compilers-util.configure")
def try_preprocess(
configure_cache, compiler, language, source, onerror=None, wrapper=[]
):
return try_invoke_compiler(
configure_cache, compiler, language, source, ["-E"], onerror, wrapper
)
@imports(_from="mozbuild.configure.constants", _import="CompilerType")
@imports(_from="mozbuild.configure.constants", _import="CPU_preprocessor_checks")
@imports(_from="mozbuild.configure.constants", _import="kernel_preprocessor_checks")
@imports(_from="mozbuild.configure.constants", _import="OS_preprocessor_checks")
@imports(_from="textwrap", _import="dedent")
@imports(_from="__builtin__", _import="Exception")
def get_compiler_info(configure_cache, compiler, language):
"""Returns information about the given `compiler` (command line in the
form of a list or tuple), in the given `language`.
The returned information includes:
- the compiler type (clang-cl, clang or gcc)
- the compiler version
- the compiler supported language
- the compiler supported language version
"""
# Xcode clang versions are different from the underlying llvm version (they
# instead are aligned with the Xcode version). Fortunately, we can tell
# apart plain clang from Xcode clang, and convert the Xcode clang version
# into the more or less corresponding plain clang version.
check = dedent(
"""\
#if defined(_MSC_VER) && defined(__clang__) && defined(_MT)
%COMPILER "clang-cl"
%VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
#elif defined(__clang__)
%COMPILER "clang"
%VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
# ifdef __apple_build_version__
%XCODE 1
# endif
#elif defined(__GNUC__) && !defined(__MINGW32__)
%COMPILER "gcc"
%VERSION __GNUC__.__GNUC_MINOR__.__GNUC_PATCHLEVEL__
#endif
#if __cplusplus
%cplusplus __cplusplus
#elif __STDC_VERSION__
%STDC_VERSION __STDC_VERSION__
#endif
"""
)
# While we're doing some preprocessing, we might as well do some more
# preprocessor-based tests at the same time, to check the toolchain
# matches what we want.
for name, preprocessor_checks in (
("CPU", CPU_preprocessor_checks),
("KERNEL", kernel_preprocessor_checks),
("OS", OS_preprocessor_checks),
):
for n, (value, condition) in enumerate(preprocessor_checks.items()):
check += dedent(
"""\
#%(if)s %(condition)s
%%%(name)s "%(value)s"
"""
% {
"if": "elif" if n else "if",
"condition": condition,
"name": name,
"value": value,
}
)
check += "#endif\n"
# Also check for endianness. The advantage of living in modern times is
# that all the modern compilers we support now have __BYTE_ORDER__ defined
# by the preprocessor.
check += dedent(
"""\
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
%ENDIANNESS "little"
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
%ENDIANNESS "big"
#endif
"""
)
result = try_preprocess(configure_cache, compiler, language, check)
if not result:
raise FatalCheckError("Unknown compiler or compiler not supported.")
# Metadata emitted by preprocessors such as GCC with LANG=ja_JP.utf-8 may
# have non-ASCII characters. Treat the output as bytearray.
data = {}
for line in result.splitlines():
if line.startswith("%"):
k, _, v = line.partition(" ")
k = k.lstrip("%")
data[k] = v.replace(" ", "").lstrip('"').rstrip('"')
log.debug("%s = %s", k, data[k])
try:
type = CompilerType(data["COMPILER"])
except Exception:
raise FatalCheckError("Unknown compiler or compiler not supported.")
cplusplus = int(data.get("cplusplus", "0L").rstrip("L"))
stdc_version = int(data.get("STDC_VERSION", "0L").rstrip("L"))
version = data.get("VERSION")
if version:
version = Version(version)
if data.get("XCODE"):
# with enough granularity for major.minor version checks further
# down the line
if version < "9.1":
version = Version("4.0.0.or.less")
elif version < "10.0":
version = Version("5.0.2")
elif version < "10.0.1":
version = Version("6.0.1")
elif version < "11.0":
version = Version("7.0.0")
elif version < "11.0.3":
version = Version("8.0.0")
elif version < "12.0":
version = Version("9.0.0")
elif version < "12.0.5":
version = Version("10.0.0")
elif version < "13.0":
version = Version("11.1.0")
elif version < "13.0.1":
version = Version("12.0.0")
elif version < "14.0":
version = Version("13.0.0")
elif version < "14.3":
version = Version("14.0.0")
elif version < "15.0":
version = Version("15.0.0")
elif version < "16.0":
version = Version("16.0.0")
else:
version = Version("17.0.6.or.more")
return namespace(
type=type,
version=version,
cpu=data.get("CPU"),
kernel=data.get("KERNEL"),
endianness=data.get("ENDIANNESS"),
os=data.get("OS"),
language="C++" if cplusplus else "C",
language_version=cplusplus if cplusplus else stdc_version,
xcode=bool(data.get("XCODE")),
)
def same_arch_different_bits():
return (
("x86", "x86_64"),
("ppc", "ppc64"),
("sparc", "sparc64"),
)
@imports(_from="mozbuild.shellutil", _import="quote")
@imports(_from="mozbuild.configure.constants", _import="OS_preprocessor_checks")
def check_compiler(configure_cache, compiler, language, target, android_version):
info = get_compiler_info(configure_cache, compiler, language)
flags = []
# Check language standards
# --------------------------------------------------------------------
if language != info.language:
raise FatalCheckError(
"`%s` is not a %s compiler." % (quote(*compiler), language)
)
# Note: We do a strict version check because there sometimes are backwards
# incompatible changes in the standard, and not all code that compiles as
# C99 compiles as e.g. C11 (as of writing, this is true of libnestegg, for
# example)
if info.language == "C" and info.language_version != 199901:
if info.type == "clang-cl":
flags.append("-Xclang")
flags.append("-std=gnu99")
cxx17_version = 201703
if info.language == "C++":
if info.language_version != cxx17_version:
# MSVC headers include C++17 features, but don't guard them
# with appropriate checks.
if info.type == "clang-cl":
flags.append("-std:c++17")
else:
flags.append("-std=gnu++17")
# Check compiler target
# --------------------------------------------------------------------
has_target = False
if target.os == "Android" and android_version:
# This makes clang define __ANDROID_API__ and use versioned library
# directories from the NDK.
toolchain = "%s%d" % (target.toolchain, android_version)
else:
toolchain = target.toolchain
if info.type == "clang":
# Add the target explicitly when the target is aarch64 macosx, because
# the Xcode clang target is named differently, and we need to work around
# the target on the command line, even if the compiler would default to
# that.
if info.xcode and target.os == "OSX" and target.cpu == "aarch64":
if "--target=arm64-apple-darwin" not in compiler:
flags.append("--target=arm64-apple-darwin")
has_target = True
elif target.os == "iOS":
target_flag = "--target=%s" % toolchain
if target_flag not in compiler:
flags.append(target_flag)
has_target = True
elif (
not info.kernel
or info.kernel != target.kernel
or not info.endianness
or info.endianness != target.endianness
):
flags.append("--target=%s" % toolchain)
has_target = True
# Add target flag when there is an OS mismatch (e.g. building for Android on
# Linux). However, only do this if the target OS is in our whitelist, to
# keep things the same on other platforms.
elif target.os in OS_preprocessor_checks and (
not info.os or info.os != target.os
):
flags.append("--target=%s" % toolchain)
has_target = True
if not has_target and (not info.cpu or info.cpu != target.cpu):
same_arch = same_arch_different_bits()
if (target.cpu, info.cpu) in same_arch:
flags.append("-m32")
elif (info.cpu, target.cpu) in same_arch:
flags.append("-m64")
elif info.type == "clang-cl" and target.cpu == "aarch64":
flags.append("--target=%s" % toolchain)
elif info.type == "clang":
flags.append("--target=%s" % toolchain)
return namespace(
type=info.type,
version=info.version,
target_cpu=info.cpu,
target_kernel=info.kernel,
target_endianness=info.endianness,
target_os=info.os,
flags=flags,
)
@imports(_from="__builtin__", _import="open")
@imports("json")
@imports("os")
def get_vc_paths(host, topsrcdir):
def vswhere(args):
program_files = os.environ.get("PROGRAMFILES(X86)") or os.environ.get(
"PROGRAMFILES"
)
if not program_files:
return []
vswhere = os.path.join(
program_files, "Microsoft Visual Studio", "Installer", "vswhere.exe"
)
if not os.path.exists(vswhere):
return []
return json.loads(check_cmd_output(vswhere, "-format", "json", *args))
variant = "arm64" if host.cpu == "aarch64" else "x86.x64"
for install in vswhere(
[
"-products",
"*",
"-requires",
f"Microsoft.VisualStudio.Component.VC.Tools.{variant}",
]
):
path = install["installationPath"]
tools_version = (
open(
os.path.join(
path, r"VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt"
),
"r",
)
.read()
.strip()
)
tools_path = os.path.join(path, r"VC\Tools\MSVC", tools_version)
yield (Version(install["installationVersion"]), tools_path)
@depends(target, host)
def is_windows(target, host):
return host.kernel == "WINNT" or target.kernel == "WINNT"
# Calling this a sysroot is a little weird, but it's the terminology clang went
# with with its -winsysroot flag.
option(
env="WINSYSROOT",
nargs=1,
when=is_windows,
help='Path to a Windows "sysroot" (directory containing MSVC, SDKs)',
)
@depends(
"WINSYSROOT",
bootstrap_path(
"vs",
when=depends("WINSYSROOT", when=is_windows)(lambda x: not x),
),
when=is_windows,
)
def winsysroot(winsysroot, bootstrapped):
if bootstrapped:
return bootstrapped
if winsysroot:
return winsysroot[0]
option(
env="VC_PATH",
nargs=1,
when=is_windows,
help="Path to the Microsoft Visual C/C++ compiler",
)
@depends(
host,
build_environment,
"VC_PATH",
winsysroot,
when=is_windows,
)
@imports("os")
@imports(_from="operator", _import="itemgetter")
def vc_compiler_paths_for_version(host, env, vc_path, winsysroot):
if winsysroot:
if vc_path:
die("WINSYSROOT and VC_PATH cannot be set together.")
base_vc_path = os.path.join(winsysroot, "VC", "Tools", "MSVC")
versions = os.listdir(base_vc_path)
vc_path = [os.path.join(base_vc_path, str(max(Version(v) for v in versions)))]
if vc_path:
# Use an arbitrary version, it doesn't matter.
all_versions = [(Version("15"), vc_path[0])]
elif host.kernel != "WINNT":
# Don't try to do anything when VC_PATH is not set on cross-compiles.
return
else:
all_versions = sorted(get_vc_paths(host, env.topsrcdir), key=itemgetter(0))
if not all_versions:
return
# Choose the newest version.
path = all_versions[-1][1]
host_dir = {
"x86_64": "Hostx64",
"x86": "Hostx86",
"aarch64": "Hostarm64",
}.get(host.cpu)
if host_dir:
path = os.path.join(path, "bin", host_dir)
return {
"x64": [os.path.join(path, "x64")],
# The cross toolchains require DLLs from the native x64 toolchain.
"x86": [os.path.join(path, "x86"), os.path.join(path, "x64")],
"arm64": [os.path.join(path, "arm64"), os.path.join(path, "x64")],
}
@depends(target, host, vc_compiler_paths_for_version, when=is_windows)
def vc_compiler_path(target, host, paths):
cpu = target.cpu if target.os == "WINNT" else host.cpu
vc_target = {
"x86": "x86",
"x86_64": "x64",
"arm": "arm",
"aarch64": "arm64",
}.get(cpu)
if not paths:
return
return paths.get(vc_target)
@depends(vc_compiler_path, original_path)
@imports("os")
@imports(_from="os", _import="environ")
def vc_toolchain_search_path(vc_compiler_path, original_path):
result = list(original_path)
if vc_compiler_path:
# The second item, if there is one, is necessary to have in $PATH for
# Windows to load the required DLLs from there.
if len(vc_compiler_path) > 1:
environ["PATH"] = os.pathsep.join(result + vc_compiler_path[1:])
# The first item is where the programs are going to be
result.append(vc_compiler_path[0])
return result
@depends_if(vc_compiler_path, when=is_windows)
def vc_compiler_version(vc_compiler_path):
version = Version(
os.path.basename(
os.path.dirname(os.path.dirname(os.path.dirname(vc_compiler_path[0])))
)
)
# MSVC path with version 14.x is actually version 19.x
if version.major == 14:
return Version(f"19.{version.minor}")
@depends_if(vc_compiler_version)
def msvs_version(vc_compiler_version):
# clang-cl emulates the same version scheme as cl. And MSVS_VERSION needs to
# be set for GYP on Windows.
if vc_compiler_version >= Version("19.30"):
return "2022"
configure_error("Only Visual Studio 2022 or newer are supported")
return ""
set_config("MSVS_VERSION", msvs_version)
clang_search_path = bootstrap_search_path("clang/bin")
@depends(
bootstrap_search_path("rustc/bin", when="MOZ_AUTOMATION"),
original_path,
)
@imports("os")
@imports(_from="os", _import="environ")
def rust_search_path(rust_path, original_path):
result = list(rust_path or original_path)
# Also add the rustup install directory for cargo/rustc.
cargo_home = environ.get("CARGO_HOME", "")
if cargo_home:
cargo_home = os.path.abspath(cargo_home)
else:
cargo_home = os.path.expanduser(os.path.join("~", ".cargo"))
rustup_path = os.path.join(cargo_home, "bin")
result.insert(0, rustup_path)
return result
# Prepend the mozilla-build msys2 path, since otherwise we can get mismatched
# cygwin dll errors during configure if we get called from another msys2
@depends(
mozillabuild_bin_paths, clang_search_path, rust_search_path, target, original_path
)
@imports("os")
def altered_path(
mozillabuild_bin_paths, clang_search_path, rust_search_path, target, original_path
):
altered_path = mozillabuild_bin_paths
if target.kernel == "Darwin":
# The rust compiler wants to execute dsymutil, but it does so in a
# so we add the clang path.
path = clang_search_path
else:
path = original_path
# cargo needs the rust search path to find cargo-$subcommand.
path += rust_search_path
for p in path:
if p not in altered_path:
altered_path.append(p)
return os.pathsep.join(altered_path)
set_config("PATH", altered_path)
# Compiler wrappers
# ==============================================================
option(
"--with-compiler-wrapper",
env="COMPILER_WRAPPER",
nargs=1,
help="Enable compiling with wrappers such as distcc and ccache",
)
option("--with-ccache", env="CCACHE", nargs="?", help="Enable compiling with ccache")
@depends_if("--with-ccache")
def ccache(value):
if len(value):
return value
# If --with-ccache was given without an explicit value, we default to
# 'ccache'.
return "ccache"
ccache = check_prog(
"CCACHE",
progs=(),
input=ccache,
paths=bootstrap_search_path(
"sccache", when=depends("CCACHE")(lambda c: len(c) and c[0] == "sccache")
),
allow_missing=True,
)
option(env="CCACHE_PREFIX", nargs=1, help="Compiler prefix to use when using ccache")
ccache_prefix = depends_if("CCACHE_PREFIX")(lambda prefix: prefix[0])
set_config("CCACHE_PREFIX", ccache_prefix)
# Distinguish ccache from sccache.
@depends_if(ccache)
def ccache_is_sccache(ccache):
return check_cmd_output(ccache, "--version").startswith("sccache")
@depends(ccache, ccache_is_sccache)
def using_ccache(ccache, ccache_is_sccache):
return ccache and not ccache_is_sccache
@depends_if(ccache, ccache_is_sccache)
def using_sccache(ccache, ccache_is_sccache):
return ccache and ccache_is_sccache
option(env="RUSTC_WRAPPER", nargs=1, help="Wrap rust compilation with given tool")
@depends(ccache, ccache_is_sccache, "RUSTC_WRAPPER")
@imports(_from="textwrap", _import="dedent")
@imports("os")
def check_sccache_version(ccache, ccache_is_sccache, rustc_wrapper):
sccache_min_version = Version("0.2.13")
def check_version(path):
out = check_cmd_output(path, "--version")
version = Version(out.rstrip().split()[-1])
if version < sccache_min_version:
die(
dedent(
"""\
sccache %s or later is required. sccache in use at %s has
version %s.
Please upgrade or acquire a new version with |./mach bootstrap|.
"""
),
sccache_min_version,
path,
version,
)
if ccache and ccache_is_sccache:
check_version(ccache)
if rustc_wrapper and (
os.path.splitext(os.path.basename(rustc_wrapper[0]))[0].lower() == "sccache"
):
check_version(rustc_wrapper[0])
set_config("MOZ_USING_CCACHE", using_ccache)
set_config("MOZ_USING_SCCACHE", using_sccache)
option(env="SCCACHE_VERBOSE_STATS", help="Print verbose sccache stats after build")
@depends(using_sccache, "SCCACHE_VERBOSE_STATS")
def sccache_verbose_stats(using_sccache, verbose_stats):
return using_sccache and bool(verbose_stats)
set_config("SCCACHE_VERBOSE_STATS", sccache_verbose_stats)
@depends("--with-compiler-wrapper", ccache)
@imports(_from="mozbuild.shellutil", _import="split", _as="shell_split")
def compiler_wrapper(wrapper, ccache):
if wrapper:
raw_wrapper = wrapper[0]
wrapper = shell_split(raw_wrapper)
wrapper_program = find_program(wrapper[0])
if not wrapper_program:
die(
"Cannot find `%s` from the given compiler wrapper `%s`",
wrapper[0],
raw_wrapper,
)
wrapper[0] = wrapper_program
if ccache:
if wrapper:
return tuple([ccache] + wrapper)
else:
return (ccache,)
elif wrapper:
return tuple(wrapper)
@dependable
def wasm():
return split_triplet("wasm32-wasi", allow_wasi=True)
@template
def default_c_compilers(host_or_target, other_c_compiler=None):
"""Template defining the set of default C compilers for the host and
target platforms.
`host_or_target` is either `host` or `target` (the @depends functions
from init.configure.
`other_c_compiler` is the `target` C compiler when `host_or_target` is `host`.
"""
assert host_or_target in {host, target, wasm}
other_c_compiler = () if other_c_compiler is None else (other_c_compiler,)
@depends(host_or_target, target, toolchain_prefix, *other_c_compiler)
def default_c_compilers(
host_or_target, target, toolchain_prefix, *other_c_compiler
):
if host_or_target.kernel == "WINNT":
if host_or_target.abi:
if host_or_target.abi == "msvc":
supported = types = ("clang-cl",)
elif host_or_target.abi == "mingw":
supported = types = ("clang",)
else:
supported = types = ("clang-cl", "clang")
elif host_or_target.kernel == "Darwin":
types = ("clang",)
supported = ("clang", "gcc")
elif host_or_target.kernel == "WASI":
supported = types = ("clang",)
else:
supported = types = ("clang", "gcc")
info = other_c_compiler[0] if other_c_compiler else None
if info and info.type in supported:
# When getting default C compilers for the host, we prioritize the
# same compiler as the target C compiler.
prioritized = info.compiler
if info.type == "gcc":
same_arch = same_arch_different_bits()
if (
target.cpu != host_or_target.cpu
and (target.cpu, host_or_target.cpu) not in same_arch
and (host_or_target.cpu, target.cpu) not in same_arch
):
# If the target C compiler is GCC, and it can't be used with
# -m32/-m64 for the host, it's probably toolchain-prefixed,
# so we prioritize a raw 'gcc' instead.
prioritized = info.type
if target.os != "WINNT" and host_or_target.os == "WINNT":
# When cross-compiling on Windows, don't prioritize. We'll fallback
# to checking for clang-cl first.
pass
else:
types = [prioritized] + [t for t in types if t != info.type]
gcc = ("gcc",)
if toolchain_prefix and host_or_target is target:
gcc = tuple("%sgcc" % p for p in toolchain_prefix) + gcc
result = []
for type in types:
if type == "gcc":
result.extend(gcc)
else:
result.append(type)
return tuple(result)
return default_c_compilers
@template
def default_cxx_compilers(c_compiler, other_c_compiler=None, other_cxx_compiler=None):
"""Template defining the set of default C++ compilers for the host and
target platforms.
`c_compiler` is the @depends function returning a Compiler instance for
the desired platform.
Because the build system expects the C and C++ compilers to be from the
same compiler suite, we derive the default C++ compilers from the C
compiler that was found if none was provided.
We also factor in the target C++ compiler when getting the default host
C++ compiler, using the target C++ compiler if the host and target C
compilers are the same.
"""
assert (other_c_compiler is None) == (other_cxx_compiler is None)
if other_c_compiler is not None:
other_compilers = (other_c_compiler, other_cxx_compiler)
else:
other_compilers = ()
@depends(c_compiler, *other_compilers)
def default_cxx_compilers(c_compiler, *other_compilers):
if other_compilers:
other_c_compiler, other_cxx_compiler = other_compilers
if other_c_compiler.compiler == c_compiler.compiler:
return (other_cxx_compiler.compiler,)
dir = os.path.dirname(c_compiler.compiler)
file = os.path.basename(c_compiler.compiler)
if c_compiler.type == "gcc":
return (os.path.join(dir, file.replace("gcc", "g++")),)
if c_compiler.type == "clang":
return (os.path.join(dir, file.replace("clang", "clang++")),)
return (c_compiler.compiler,)
return default_cxx_compilers
@template
def provided_program(env_var, when=None):
"""Template handling cases where a program can be specified either as a
path or as a path with applicable arguments.
"""
@depends_if(env_var, when=when)
@imports(_from="itertools", _import="takewhile")
@imports(_from="mozbuild.shellutil", _import="split", _as="shell_split")
def provided(cmd):
# Assume the first dash-prefixed item (and any subsequent items) are
# command-line options, the item before the dash-prefixed item is
# the program we're looking for, and anything before that is a wrapper
# of some kind (e.g. sccache).
cmd = shell_split(cmd[0])
without_flags = list(takewhile(lambda x: not x.startswith("-"), cmd))
return namespace(
wrapper=without_flags[:-1],
program=without_flags[-1],
flags=cmd[len(without_flags) :],
)
return provided
@template
def sysroot(host_or_target, target_sysroot=None):
assert target_sysroot or host_or_target is target
bootstrap_target_when = target_is_linux_or_wasi
if host_or_target is host:
host_or_target_str = "host"
opt = "--with-host-sysroot"
env = "HOST_SYSROOT"
when = depends(host)(lambda h: h.kernel == "Linux")
# Only bootstrap a host sysroot when using a bootstrapped target sysroot
# or when the target doesn't use a bootstrapped sysroot in the first place.
@depends(when, bootstrap_target_when, target_sysroot.bootstrapped)
def bootstrap_when(when, bootstrap_target_when, bootstrapped):
return when and (bootstrapped or not bootstrap_target_when)
else:
assert host_or_target is target
host_or_target_str = "target"
opt = "--with-sysroot"
env = "SYSROOT"
when = target_is_linux_or_wasi
bootstrap_when = bootstrap_target_when
option(
opt,
env=env,
nargs=1,
when=when,
help="Use the given sysroot directory for %s build" % host_or_target_str,
)
sysroot_input = depends(opt, when=when)(lambda x: x)
bootstrap_sysroot = depends(bootstrap_when, sysroot_input)(
# Only bootstrap when no flag was explicitly given (either --with or --without)
lambda bootstrap, input: bootstrap
and not input
and input.origin == "default"
)
@depends(
sysroot_input,
host_or_target,
macos_sdk,
ios_sdk,
bootstrap_path(
depends(host_or_target)(lambda t: "sysroot-{}".format(t.toolchain)),
when=bootstrap_sysroot,
),
)
@imports("os")
def sysroot(sysroot_input, host_or_target, macos_sdk, ios_sdk, path):
version = None
if sysroot_input:
path = sysroot_input[0]
elif host_or_target.os == "OSX" and macos_sdk:
path = macos_sdk
elif host_or_target.os == "iOS" and ios_sdk:
path = ios_sdk
if path:
# Find the version of libstdc++ headears in the sysroot
include = os.path.join(path, "usr/include/c++")
if os.path.isdir(include):
with os.scandir(include) as d:
version = max(Version(e.name) for e in d if e.is_dir())
log.info("Using %s sysroot in %s", host_or_target_str, path)
return namespace(
path=path,
bootstrapped=bool(path and not sysroot_input),
stdcxx_version=version,
)
return sysroot
target_sysroot = sysroot(target)
# Use `system_lib_option` instead of `option` for options that enable building
# with a system library for which the development headers are not available in
# the bootstrapped sysroots.
@template
def system_lib_option(name, *args, **kwargs):
option(name, *args, **kwargs)
@depends(
name,
target_sysroot.bootstrapped,
when=kwargs.get("when"),
)
def no_system_lib_in_sysroot(value, bootstrapped):
if bootstrapped and value:
die(
"%s is not supported with bootstrapped sysroot. "
"Drop the option, or use --without-sysroot or --disable-bootstrap",
value.format(name),
)
host_sysroot = sysroot(host, target_sysroot)
@template
def multiarch_dir(host_or_target):
sysroot = {
host: host_sysroot,
target: target_sysroot,
}[host_or_target]
@depends(host_or_target, when=sysroot.path)
def multiarch_dir(target):
if target.cpu == "x86":
# Turn e.g. i686-linux-gnu into i386-linux-gnu
return target.toolchain.replace(target.raw_cpu, "i386")
return target.toolchain
return multiarch_dir
target_multiarch_dir = multiarch_dir(target)
host_multiarch_dir = multiarch_dir(host)
def minimum_gcc_version():
return Version("8.1.0")
@template
def compiler(
language,
host_or_target,
c_compiler=None,
other_compiler=None,
other_c_compiler=None,
):
"""Template handling the generic base checks for the compiler for the
given `language` on the given platform (`host_or_target`).
`host_or_target` is either `host` or `target` (the @depends functions
from init.configure.
When the language is 'C++', `c_compiler` is the result of the `compiler`
template for the language 'C' for the same `host_or_target`.
When `host_or_target` is `host`, `other_compiler` is the result of the
`compiler` template for the same `language` for `target`.
When `host_or_target` is `host` and the language is 'C++',
`other_c_compiler` is the result of the `compiler` template for the
language 'C' for `target`.
"""
assert host_or_target in {host, target, wasm}
assert language in ("C", "C++")
assert language == "C" or c_compiler is not None
assert host_or_target is target or other_compiler is not None
assert language == "C" or host_or_target is target or other_c_compiler is not None
host_or_target_str = {
host: "host",
target: "target",
wasm: "wasm",
}[host_or_target]
sysroot = {
host: host_sysroot,
target: target_sysroot,
wasm: dependable(lambda: namespace(path=None)),
}[host_or_target]
multiarch_dir = {
host: host_multiarch_dir,
target: target_multiarch_dir,
wasm: never,
}[host_or_target]
var = {
("C", target): "CC",
("C++", target): "CXX",
("C", host): "HOST_CC",
("C++", host): "HOST_CXX",
("C", wasm): "WASM_CC",
("C++", wasm): "WASM_CXX",
}[language, host_or_target]
default_compilers = {
"C": lambda: default_c_compilers(host_or_target, other_compiler),
"C++": lambda: default_cxx_compilers(
c_compiler, other_c_compiler, other_compiler
),
}[language]()
what = "the %s %s compiler" % (host_or_target_str, language)
option(env=var, nargs=1, help="Path to %s" % what)
# Handle the compiler given by the user through one of the CC/CXX/HOST_CC/
# HOST_CXX variables.
provided_compiler = provided_program(var)
# Normally, we'd use `var` instead of `_var`, but the interaction with
# old-configure complicates things, and for now, we a) can't take the plain
# result from check_prog as CC/CXX/HOST_CC/HOST_CXX and b) have to let
# old-configure AC_SUBST it (because it's autoconf doing it, not us)
compiler = check_prog(
"_%s" % var,
what=what,
progs=default_compilers,
input=provided_compiler.program,
paths=clang_search_path,
)
@depends(
configure_cache,
compiler,
provided_compiler,
compiler_wrapper,
host_or_target,
sysroot,
macos_target,
ios_target,
android_version,
vc_compiler_version,
multiarch_dir,
winsysroot,
host,
)
@checking("whether %s can be used" % what, lambda x: bool(x))
@imports(_from="mozbuild.shellutil", _import="quote")
@imports("os")
def valid_compiler(
configure_cache,
compiler,
provided_compiler,
compiler_wrapper,
host_or_target,
sysroot,
macos_target,
ios_target,
android_version,
vc_compiler_version,
multiarch_dir,
winsysroot,
host,
):
wrapper = list(compiler_wrapper or ())
flags = []
if sysroot.path:
if host_or_target.kernel == "Darwin":
# While --sysroot and -isysroot are roughly equivalent, when not using
# -isysroot on mac, clang takes the SDKROOT environment variable into
# consideration, which may be set by python and break things.
flags.extend(("-isysroot", sysroot.path))
else:
flags.extend(("--sysroot", sysroot.path))
if provided_compiler:
wrapper.extend(provided_compiler.wrapper)
flags.extend(provided_compiler.flags)
info = check_compiler(
configure_cache,
wrapper + [compiler] + flags,
language,
host_or_target,
android_version,
)
if host_or_target.os == "OSX" and macos_target:
flags.append("-mmacosx-version-min=%s" % macos_target)
if host_or_target.os == "iOS" and ios_target:
flags.append("-mios-version-min=%s" % ios_target)
# When not given an explicit compatibility version, clang-cl tries
# to get one from MSVC, which might not even be the one used by the
# build. And when it can't find one, its default might also not match
# what the build is using. So if we were able to figure out the version
# we're building with, explicitly use that.
# This also means that, as a side effect, clang-cl will not try to find
# MSVC, which saves a little overhead.
if info.type == "clang-cl" and vc_compiler_version:
flags.append(f"-fms-compatibility-version={vc_compiler_version}")
if info.type == "clang" and language == "C++" and host_or_target.os == "OSX":
flags.append("-stdlib=libc++")
# Check that the additional flags we got are enough to not require any
# more flags. If we get an exception, just ignore it; it's liable to be
# invalid command-line flags, which means the compiler we're checking
# doesn't support those command-line flags and will fail one or more of
# the checks below.
try:
if info.flags:
flags += info.flags
info = check_compiler(
configure_cache,
wrapper + [compiler] + flags,
language,
host_or_target,
android_version,
)
except FatalCheckError:
pass
if not info.target_cpu or info.target_cpu != host_or_target.cpu:
raise FatalCheckError(
"%s %s compiler target CPU (%s) does not match --%s CPU (%s)"
% (
host_or_target_str.capitalize(),
language,
info.target_cpu or "unknown",
host_or_target_str,
host_or_target.raw_cpu,
)
)
if not info.target_kernel or (info.target_kernel != host_or_target.kernel):
raise FatalCheckError(
"%s %s compiler target kernel (%s) does not match --%s kernel (%s)"
% (
host_or_target_str.capitalize(),
language,
info.target_kernel or "unknown",
host_or_target_str,
host_or_target.kernel,
)
)
if not info.target_endianness or (
info.target_endianness != host_or_target.endianness
):
raise FatalCheckError(
"%s %s compiler target endianness (%s) does not match --%s "
"endianness (%s)"
% (
host_or_target_str.capitalize(),
language,
info.target_endianness or "unknown",
host_or_target_str,
host_or_target.endianness,
)
)
# Compiler version checks
# ===================================================
# Check the compiler version here instead of in `compiler_version` so
# that the `checking` message doesn't pretend the compiler can be used
# to then bail out one line later.
if info.type == "gcc":
if host_or_target.os == "Android":
raise FatalCheckError(
"GCC is not supported on Android.\n"
"Please use clang from the Android NDK instead."
)
gcc_version = minimum_gcc_version()
if info.version < gcc_version:
raise FatalCheckError(
"Only GCC %d.%d or newer is supported (found version %s)."
% (gcc_version.major, gcc_version.minor, info.version)
)
# Force GCC to use the C++ headers from the sysroot, and to prefer the
# sysroot system headers to /usr/include.
# Non-Debian GCC also doesn't look at headers in multiarch directory.
if sysroot.bootstrapped and sysroot.stdcxx_version:
version = sysroot.stdcxx_version
for path in (
"usr/include/c++/{}".format(version),
"usr/include/{}/c++/{}".format(multiarch_dir, version),
"usr/include/{}".format(multiarch_dir),
"usr/include",
):
flags.extend(("-isystem", os.path.join(sysroot.path, path)))
if info.type == "clang-cl":
if info.version < "9.0.0":
raise FatalCheckError(
"Only clang-cl 9.0 or newer is supported (found version %s)"
% info.version
)
if winsysroot and host.os != "WINNT":
overlay = os.path.join(winsysroot, "overlay.yaml")
if os.path.exists(overlay):
overlay_flags = ["-Xclang", "-ivfsoverlay", "-Xclang", overlay]
if info.version >= "16.0" or (
# clang-cl 15 normally doesn't support the root-relative
# overlay we use, but the bootstrapped clang-cl 15 is patched
# to support it, so check we're using a patched version.
info.version >= "15.0"
and try_preprocess(
configure_cache,
[compiler] + flags + overlay_flags,
language,
"",
onerror=lambda: False,
wrapper=wrapper,
)
):
flags.extend(overlay_flags)
if (info.type, host_or_target.abi) in (
("clang", "msvc"),
("clang-cl", "mingw"),
):
raise FatalCheckError("Unknown compiler or compiler not supported.")
# If you want to bump the version check here ensure the version
# is known for Xcode in get_compiler_info.
if info.type == "clang" and info.version < "8.0":
raise FatalCheckError(
"Only clang/llvm 8.0 or newer is supported (found version %s)."
% info.version
)
if host_or_target.kernel == "WASI":
if info.type != "clang":
raise FatalCheckError(
"Only clang is supported for %s" % host_or_target.alias
)
if info.version < "8.0":
raise FatalCheckError(
"Only clang/llvm 8.0 or newer is supported for %s (found version %s)."
% (host_or_target.alias, info.version)
)
if host_or_target.os == "Android":
# Need at least clang 13 for compiler-rt/libunwind being the default.
if info.type == "clang" and info.version < "13.0":
raise FatalCheckError(
"Only clang/llvm 13.0 or newer is supported for %s (found version %s)."
% (host_or_target.alias, info.version)
)
if host_or_target.os == "WINNT" and info.type == "gcc":
raise FatalCheckError(
"Firefox cannot be built with mingw-gcc and requires a mingw-clang toolchain to work."
)
if info.flags:
raise FatalCheckError("Unknown compiler or compiler not supported.")
return namespace(
wrapper=wrapper,
compiler=compiler,
flags=flags,
type=info.type,
version=info.version,
language=language,
)
@depends(valid_compiler)
@checking("%s version" % what)
def compiler_version(compiler):
return compiler.version
if language == "C++":
@depends(valid_compiler, c_compiler)
def valid_compiler(compiler, c_compiler):
if compiler.type != c_compiler.type:
die(
"The %s C compiler is %s, while the %s C++ compiler is "
"%s. Need to use the same compiler suite.",
host_or_target_str,
c_compiler.type,
host_or_target_str,
compiler.type,
)
if compiler.version != c_compiler.version:
die(
"The %s C compiler is version %s, while the %s C++ "
"compiler is version %s. Need to use the same compiler "
"version.",
host_or_target_str,
c_compiler.version,
host_or_target_str,
compiler.version,
)
return compiler
# This excludes WASM_CC from the list.
if var in ("CC", "CXX", "HOST_CC", "HOST_CXX"):
# FIXME: we should return a plain list here.
@depends_if(valid_compiler)
@imports(_from="mozbuild.shellutil", _import="quote")
def value(x):
return quote(*x.wrapper, x.compiler, *x.flags)
set_config(var, value)
# Set CC_TYPE/CC_VERSION/HOST_CC_TYPE/HOST_CC_VERSION to allow
# old-configure to do some of its still existing checks.
if language == "C":
set_config("%s_TYPE" % var, valid_compiler.type)
set_config(
"%s_VERSION" % var, depends(valid_compiler.version)(lambda v: str(v))
)
valid_compiler = compiler_class(valid_compiler, host_or_target)
def compiler_error():
raise FatalCheckError(
"Failed compiling a simple %s source with %s" % (language, what)
)
valid_compiler.try_compile(check_msg="%s works" % what, onerror=compiler_error)
set_config("%s_BASE_FLAGS" % var, valid_compiler.flags)
# Set CPP/CXXCPP for both the build system and old-configure. We don't
# need to check this works for preprocessing, because we already relied
# on $CC -E/$CXX -E doing preprocessing work to validate the compiler
# in the first place.
if host_or_target is target:
pp_var = {
"C": "CPP",
"C++": "CXXCPP",
}[language]
preprocessor = depends_if(valid_compiler)(
lambda x: list(x.wrapper) + [x.compiler, "-E"] + list(x.flags)
)
set_config(pp_var, preprocessor)
if language == "C":
linker_var = {
target: "LD",
host: "HOST_LD",
}.get(host_or_target)
if linker_var:
@deprecated_option(env=linker_var, nargs=1)
def linker(value):
if value:
return value[0]
@depends(linker)
def unused_linker(linker):
if linker:
log.warning(
"The value of %s is not used by this build system." % linker_var
)
return valid_compiler
c_compiler = compiler("C", target)
cxx_compiler = compiler("C++", target, c_compiler=c_compiler)
host_c_compiler = compiler("C", host, other_compiler=c_compiler)
host_cxx_compiler = compiler(
"C++",
host,
c_compiler=host_c_compiler,
other_compiler=cxx_compiler,
other_c_compiler=c_compiler,
)
@template
def windows_abi(host_or_target, c_compiler):
@depends(host_or_target)
def windows_abi(host_or_target):
if host_or_target.os == "WINNT":
return host_or_target.abi
@depends(host_or_target, windows_abi)
def need_windows_abi_from_compiler(host_or_target, windows_abi):
return host_or_target.os == "WINNT" and windows_abi is None
@depends(host_or_target, c_compiler, when=need_windows_abi_from_compiler)
def windows_abi_from_compiler(host_or_target, c_compiler):
if host_or_target.os == "WINNT":
if c_compiler.type == "clang-cl":
return "msvc"
return "mingw"
return windows_abi | windows_abi_from_compiler
target_windows_abi = windows_abi(target, c_compiler)
host_windows_abi = windows_abi(host, host_c_compiler)
# Generic compiler-based conditions.
building_with_gcc = depends(c_compiler)(lambda info: info.type == "gcc")
building_with_gnu_compatible_cc = depends(c_compiler)(
lambda info: info.type != "clang-cl"
)
@depends(cxx_compiler, ccache_prefix)
@imports("os")
def cxx_is_icecream(info, ccache_prefix):
if (
os.path.islink(info.compiler)
and os.path.basename(os.readlink(info.compiler)) == "icecc"
):
return True
if ccache_prefix and os.path.basename(ccache_prefix) == "icecc":
return True
set_config("CXX_IS_ICECREAM", cxx_is_icecream)
# Libstdc++ compatibility hacks
# ==============================================================
#
@depends(target, host)
def target_or_host_is_linux(target, host):
return any(t.os == "GNU" and t.kernel == "Linux" for t in (target, host))
option(
"--enable-stdcxx-compat",
env="MOZ_STDCXX_COMPAT",
help="Enable compatibility with older libstdc++",
when=target_or_host_is_linux,
)
@depends("--enable-stdcxx-compat", when=target_or_host_is_linux)
def stdcxx_compat(value):
if value:
return True
set_config("MOZ_STDCXX_COMPAT", True, when=stdcxx_compat)
# Linker detection
# ==============================================================
# The policy is as follows:
# For Windows:
# - the linker is picked via the LINKER environment variable per windows.configure,
# but ought to be lld-link in any case.
# For macOS:
# - the linker is lld if the clang used is >= 15 (per LLVM version, not Xcode version).
# - the linker is also lld on local developer builds if the clang used is >= 13 (per LLVM
# version, not Xcode version)
# - otherwise the linker is ld64, either from XCode on macOS, or from cctools-ports when
# cross-compiling.
# For other OSes:
# - on local developer builds: lld if present and the compiler is clang. Otherwise gold
# is used if present otherwise, whatever the compiler uses by default.
# - on release/official builds: whatever the compiler uses by default, except when the
# compiler is clang, in which case lld is preferred when it's new enough.
@template
def is_not_winnt_or_sunos(host_or_target):
@depends(host_or_target)
def is_not_winnt_or_sunos(host_or_target):
if host_or_target.kernel not in ("WINNT", "SunOS"):
return True
return is_not_winnt_or_sunos
is_linker_option_enabled = is_not_winnt_or_sunos(target)
@deprecated_option("--enable-gold", env="MOZ_FORCE_GOLD", when=is_linker_option_enabled)
def enable_gold(value):
if value:
die("--enable-gold is deprecated, use --enable-linker=gold instead")
else:
die("--disable-gold is deprecated, use --enable-linker=something_else instead")
option(
"--enable-linker",
nargs=1,
help="Select the linker {bfd, gold, ld64, lld, lld-*, mold}",
when=is_linker_option_enabled,
)
# No-op to enable depending on --enable-linker from default_elfhack in
# toolkit/moz.configure.
@depends("--enable-linker", when=is_linker_option_enabled)
def enable_linker(linker):
return linker
@template
def select_linker_tmpl(host_or_target):
if host_or_target is target:
deps = depends(
"--enable-linker",
c_compiler,
developer_options,
extra_toolchain_flags,
target,
stdcxx_compat,
when=is_linker_option_enabled,
)
host_or_target_str = "target"
else:
deps = depends(
dependable(None),
host_c_compiler,
developer_options,
dependable(None),
host,
stdcxx_compat,
when=is_not_winnt_or_sunos(host_or_target),
)
host_or_target_str = "host"
@deps
@checking(f"for {host_or_target_str} linker", lambda x: x.KIND)
@imports("os")
@imports("shutil")
def select_linker(
linker, c_compiler, developer_options, toolchain_flags, target, stdcxx_compat
):
if linker:
linker = linker[0]
else:
linker = None
def is_valid_linker(linker):
if target.kernel == "Darwin":
valid_linkers = ("ld64", "lld")
else:
valid_linkers = ("bfd", "gold", "lld", "mold")
if linker in valid_linkers:
return True
if "lld" in valid_linkers and linker.startswith("lld-"):
return True
return False
if linker and not is_valid_linker(linker):
# Check that we are trying to use a supported linker
die("Unsupported linker " + linker)
# Check the kind of linker
version_check = ["-Wl,--version"]
cmd_base = c_compiler.wrapper + [c_compiler.compiler] + c_compiler.flags
def try_linker(linker):
# Generate the compiler flag
if linker == "ld64":
linker_flag = ["-fuse-ld=ld"]
elif linker:
linker_flag = ["-fuse-ld=" + linker]
else:
linker_flag = []
cmd = cmd_base + linker_flag + version_check
if toolchain_flags:
cmd += toolchain_flags
# ld64 doesn't have anything to print out a version. It does print out
# "ld64: For information on command line options please use 'man ld'."
# but that would require doing two attempts, one with --version, that
# would fail, and another with --help.
# Instead, abuse its LD_PRINT_OPTIONS feature to detect a message
# specific to it on stderr when it fails to process --version.
env = dict(os.environ)
env["LD_PRINT_OPTIONS"] = "1"
# Some locales might not print out the strings we are looking for, so
# ensure consistent output.
env["LC_ALL"] = "C"
retcode, stdout, stderr = get_cmd_output(*cmd, env=env)
if retcode == 1 and "Logging ld64 options" in stderr:
kind = "ld64"
elif retcode != 0:
return None
elif "mold" in stdout:
kind = "mold"
elif "GNU ld" in stdout:
# We are using the normal linker
kind = "bfd"
elif "GNU gold" in stdout:
kind = "gold"
elif "LLD" in stdout:
kind = "lld"
else:
kind = "unknown"
if kind == "unknown" or is_valid_linker(kind):
return namespace(
KIND=kind,
LINKER_FLAG=linker_flag,
)
result = None
if linker:
result = try_linker(linker)
if result is None:
die("Could not use {} as linker".format(linker))
if (
result is None
and c_compiler.type == "clang"
and (
(
target.kernel != "Darwin"
and (
developer_options
or host_or_target_str == "host"
or c_compiler.version >= "15.0"
)
)
or (
target.kernel == "Darwin"
and (
(developer_options and c_compiler.version >= "13.0")
or c_compiler.version >= "15.0"
)
)
)
):
result = try_linker("lld")
if result is None and developer_options and not stdcxx_compat:
result = try_linker("gold")
if result is None:
result = try_linker(None)
if result is None:
die("Failed to find an adequate linker")
if stdcxx_compat and result.KIND == "gold":
die("--enable-stdcxx-compat is not compatible with the gold linker")
# If an explicit linker was given, error out if what we found is different.
if linker and not linker.startswith(result.KIND):
die("Could not use {} as linker".format(linker))
return result
return select_linker
select_linker = select_linker_tmpl(target)
@template
def linker_ldflags_tmpl(host_or_target):
if host_or_target is target:
deps = depends_if(
select_linker,
target,
target_sysroot,
target_multiarch_dir,
android_sysroot,
android_version,
c_compiler,
developer_options,
)
else:
deps = depends_if(
select_linker_tmpl(host),
host,
host_sysroot,
host_multiarch_dir,
dependable(None),
dependable(None),
host_c_compiler,
developer_options,
)
@deps
@imports("os")
def linker_ldflags(
linker,
target,
sysroot,
multiarch_dir,
android_sysroot,
android_version,
c_compiler,
developer_options,
):
flags = list((linker and linker.LINKER_FLAG) or [])
# rpath-link is irrelevant to wasm, see for more info https://github.com/emscripten-core/emscripten/issues/11076.
if sysroot.path and multiarch_dir and target.os != "WASI":
for d in ("lib", "usr/lib"):
multiarch_lib_dir = os.path.join(sysroot.path, d, multiarch_dir)
if os.path.exists(multiarch_lib_dir):
# Non-Debian-patched binutils linkers (both BFD and gold) don't lookup
# in multi-arch directories.
flags.append("-Wl,-rpath-link,%s" % multiarch_lib_dir)
# GCC also needs -L.
if c_compiler.type == "gcc":
flags.append("-L%s" % multiarch_lib_dir)
if (
c_compiler.type == "gcc"
and sysroot.bootstrapped
and sysroot.stdcxx_version
):
flags.append(
"-L{}/usr/lib/gcc/{}/{}".format(
sysroot.path, multiarch_dir, sysroot.stdcxx_version
)
)
if android_sysroot:
# BFD/gold linkers need a manual --rpath-link for indirect
# dependencies.
flags += [
"-Wl,--rpath-link={}/usr/lib/{}".format(
android_sysroot, target.toolchain
),
"-Wl,--rpath-link={}/usr/lib/{}/{}".format(
android_sysroot, target.toolchain, android_version
),
]
if (
developer_options
and linker
and linker.KIND == "lld"
and target.kernel != "WINNT"
):
flags.append("-Wl,-O0")
return flags
return linker_ldflags
linker_ldflags = linker_ldflags_tmpl(target)
host_linker_ldflags = linker_ldflags_tmpl(host)
# There's a wrinkle with MinGW: linker configuration is not enabled, so
# `select_linker` is never invoked. Hard-code around it.
@depends(select_linker, target, c_compiler)
def gcc_use_gnu_ld(select_linker, target, c_compiler):
if select_linker is not None and target.kernel != "Darwin":
return select_linker.KIND in ("bfd", "gold", "lld", "mold")
if target.kernel == "WINNT" and c_compiler.type == "clang":
return True
return None
# GCC_USE_GNU_LD=1 means the linker is command line compatible with GNU ld.
set_config("GCC_USE_GNU_LD", gcc_use_gnu_ld)
include("compile-checks.configure")
include("arm.configure", when=depends(target.cpu)(lambda cpu: cpu == "arm"))
# Libstdc++ feature detection
# ==============================================================
with only_when("--enable-debug"):
using_libstdcxx = try_compile(
language="C++",
includes=["new"],
body="#ifndef __GLIBCXX__\n#error 1\n#endif",
when=depends(c_compiler)(lambda c: c.type != "clang-cl"),
)
libstdcxx_assertions = depends(when=using_libstdcxx)("_GLIBCXX_ASSERTIONS")
using_libcxx = try_compile(
language="C++",
includes=["new"],
body="#ifndef _LIBCPP_VERSION\n#error 1\n#endif",
when=~using_libstdcxx,
)
@depends(
try_compile(language="C++", body="#if _LIBCPP_STD_VER >= 18\n#error 1\n#endif"),
when=using_libcxx,
)
def libcxx_assertions(modern_libcxx):
if modern_libcxx:
return "_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_DEBUG"
else:
return "_LIBCPP_HARDENING_MODE_DEBUG"
@depends(
libstdcxx_assertions,
libcxx_assertions,
when=libstdcxx_assertions | libcxx_assertions,
)
@checking("which standard c++ library assertions to use")
def stdlibcxx_assertions(libstdcxx, libcxx):
return libstdcxx or libcxx
@depends(
have_64_bit,
try_compile(
body='static_assert(sizeof(void *) == 8, "")', check_msg="for 64-bit OS"
),
)
def check_have_64_bit(have_64_bit, compiler_have_64_bit):
if have_64_bit != compiler_have_64_bit:
configure_error(
"The target compiler does not agree with configure "
"about the target bitness."
)
@depends(cxx_compiler, target)
def needs_libstdcxx_newness_check(cxx_compiler, target):
# We only have to care about this on Linux and MinGW.
if cxx_compiler.type == "clang-cl":
return
if target.kernel not in ("Linux", "WINNT"):
return
if target.os == "Android":
return
return True
def die_on_old_libstdcxx():
die(
"The libstdc++ in use is not new enough. Please run "
"./mach bootstrap to update your compiler, or update your system "
"libstdc++ installation."
)
try_compile(
includes=["cstddef"],
body="\n".join(
[
# _GLIBCXX_RELEASE showed up in libstdc++ 7.
"#if defined(__GLIBCXX__) && !defined(_GLIBCXX_RELEASE)",
"# error libstdc++ not new enough",
"#endif",
"#if defined(_GLIBCXX_RELEASE)",
"# if _GLIBCXX_RELEASE < %d" % minimum_gcc_version().major,
"# error libstdc++ not new enough",
"# else",
" (void) 0",
"# endif",
"#endif",
]
),
check_msg="for new enough STL headers from libstdc++",
when=needs_libstdcxx_newness_check,
onerror=die_on_old_libstdcxx,
)
@depends(c_compiler, target)
def default_debug_flags(compiler_info, target):
# Debug info is ON by default.
if compiler_info.type == "clang-cl":
return ("-Z7",)
elif target.kernel == "WINNT" and compiler_info.type == "clang":
return ("-g", "-gcodeview")
# The oldest versions of supported compilers default to DWARF-4, but
# newer versions may default to DWARF-5 or newer (e.g. clang 14), which
# Valgrind doesn't support. Force-use DWARF-4.
return ("-gdwarf-4",)
option(env="MOZ_DEBUG_FLAGS", nargs=1, help="Debug compiler flags")
imply_option("--enable-debug-symbols", depends_if("--enable-debug")(lambda v: v))
option(
"--disable-debug-symbols",
nargs="?",
help="Disable debug symbols using the given compiler flags",
)
set_config("MOZ_DEBUG_SYMBOLS", depends_if("--enable-debug-symbols")(lambda _: True))
@depends("MOZ_DEBUG_FLAGS", "--enable-debug-symbols", default_debug_flags)
@imports(_from="mozbuild.shellutil", _import="split")
def debug_flags(env_debug_flags, enable_debug_flags, default_debug_flags):
# If MOZ_DEBUG_FLAGS is set, and --enable-debug-symbols is set to a value,
# --enable-debug-symbols takes precedence. Note, the value of
# --enable-debug-symbols may be implied by --enable-debug.
if len(enable_debug_flags):
return split(enable_debug_flags[0])
if env_debug_flags:
return split(env_debug_flags[0])
return default_debug_flags
set_config("MOZ_DEBUG_FLAGS", debug_flags)
@depends(c_compiler, host)
@imports(
_from="mach.logging", _import="enable_blessed", _as="_enable_ansi_escape_codes"
)
def color_cflags(info, host):
# We could test compiling with flags. By why incur the overhead when
# color support should always be present in a specific toolchain
# version?
# Code for auto-adding this flag to compiler invocations needs to
# determine if an existing flag isn't already present. That is likely
# using exact string matching on the returned value. So if the return
# value changes to e.g. "<x>=always", exact string match may fail and
# multiple color flags could be added. So examine downstream consumers
# before adding flags to return values.
if info.type == "gcc":
return "-fdiagnostics-color"
elif info.type in ["clang", "clang-cl"]:
if host.os == "WINNT" and _enable_ansi_escape_codes():
return "-fcolor-diagnostics -fansi-escape-codes"
else:
return "-fcolor-diagnostics"
else:
return ""
set_config("COLOR_CFLAGS", color_cflags)
# Some standard library headers (notably bionic on Android) declare standard
# functions (e.g. getchar()) and also #define macros for those standard
# functions. libc++ deals with this by doing something like the following
# (explanatory comments added):
#
# #ifdef FUNC
# // Capture the definition of FUNC.
# inline _LIBCPP_INLINE_VISIBILITY int __libcpp_FUNC(...) { return FUNC(...); }
# #undef FUNC
# // Use a real inline definition.
# inline _LIBCPP_INLINE_VISIBILITY int FUNC(...) { return _libcpp_FUNC(...); }
# #endif
#
# _LIBCPP_INLINE_VISIBILITY is typically defined as:
#
# __attribute__((__visibility__("hidden"), __always_inline__))
#
# Unfortunately, this interacts badly with our system header wrappers, as the:
#
# #pragma GCC visibility push(default)
#
# that they do prior to including the actual system header is treated by the
# compiler as an explicit declaration of visibility on every function declared
# in the header. Therefore, when the libc++ code above is encountered, it is
# as though the compiler has effectively seen:
#
# int FUNC(...) __attribute__((__visibility__("default")));
# int FUNC(...) __attribute__((__visibility__("hidden")));
#
# and the compiler complains about the mismatched visibility declarations.
#
# However, libc++ will only define _LIBCPP_INLINE_VISIBILITY if there is no
# existing definition. We can therefore define it to the empty string (since
# we are properly managing visibility ourselves) and avoid this whole mess.
# Note that we don't need to do this with gcc, as libc++ detects gcc and
# effectively does the same thing we are doing here.
#
# _LIBCPP_ALWAYS_INLINE needs a similar workarounds, since it too declares
# hidden visibility.
#
# _LIBCPP_HIDE_FROM_ABI is a macro in libc++ versions in NDKs >=r19. It too
# declares hidden visibility, but it also declares functions as excluded from
# explicit instantiation (roughly: the function can be unused in the current
# compilation, but does not then trigger an actual definition of the function;
# it is assumed the real definition comes from elsewhere). We need to replicate
# this setup.
@depends(c_compiler, target)
def libcxx_override_visibility(c_compiler, target):
if c_compiler.type == "clang" and target.os == "Android":
return namespace(
empty="",
hide_from_abi="__attribute__((__exclude_from_explicit_instantiation__))",
)
set_define("_LIBCPP_INLINE_VISIBILITY", libcxx_override_visibility.empty)
set_define("_LIBCPP_ALWAYS_INLINE", libcxx_override_visibility.empty)
set_define("_LIBCPP_HIDE_FROM_ABI", libcxx_override_visibility.hide_from_abi)
@depends(target, build_environment)
def visibility_flags(target, env):
if target.os != "WINNT":
if target.kernel == "Darwin":
return ("-fvisibility=hidden", "-fvisibility-inlines-hidden")
return (
"-I%s/system_wrappers" % os.path.join(env.dist),
"-include",
"%s/config/gcc_hidden.h" % env.topsrcdir,
)
@depends(target, visibility_flags)
def wrap_system_includes(target, visibility_flags):
if visibility_flags and target.kernel != "Darwin":
return True
set_define(
"HAVE_VISIBILITY_HIDDEN_ATTRIBUTE",
depends(visibility_flags)(lambda v: bool(v) or None),
)
set_define(
"HAVE_VISIBILITY_ATTRIBUTE", depends(visibility_flags)(lambda v: bool(v) or None)
)
set_config("WRAP_SYSTEM_INCLUDES", wrap_system_includes)
set_config("VISIBILITY_FLAGS", visibility_flags)
# We pass linker_optimize_flags to the linker because if dead_strip is
# enabled, the linker in xcode 4.1 will crash. Without this it would crash when
# linking XUL.
@depends(target, c_compiler)
def check_thread(target, c_compiler):
if target.cpu in ("mips32", "mips64"):
# mips builds fail with TLS variables because of a binutils bug.
return False
if target.os == "Android":
# The custom dynamic linker doesn't support TLS variables
return False
if target.kernel == "OpenBSD":
# OpenBSD doesn't have TLS support, and the test succeeds with clang++
return False
return c_compiler.type != "clang-cl"
set_define(
"HAVE_THREAD_TLS_KEYWORD",
try_link(
body="static __thread bool tlsIsMainThread = false; return tlsIsMainThread;",
flags=linker_optimize_flags.ldflags,
check_msg="for __thread keyword for TLS variables",
when=check_thread,
),
)
@template
def depend_cflags(host_or_target_c_compiler):
@depends(host_or_target_c_compiler)
def depend_cflags(host_or_target_c_compiler):
if host_or_target_c_compiler.type != "clang-cl":
return ["-MD", "-MP", "-MF $(MDDEPDIR)/$(@F).pp"]
else:
# clang-cl doesn't accept the normal -MD -MP -MF options that clang
# does, but the underlying cc1 binary understands how to generate
# dependency files. These options are based on analyzing what the
# normal clang driver sends to cc1 when given the "correct"
# dependency options.
return [
"-Xclang",
"-MP",
"-Xclang",
"-dependency-file",
"-Xclang",
"$(MDDEPDIR)/$(@F).pp",
"-Xclang",
"-MT",
"-Xclang",
"$@",
]
return depend_cflags
set_config("_DEPEND_CFLAGS", depend_cflags(c_compiler))
set_config("_HOST_DEPEND_CFLAGS", depend_cflags(host_c_compiler))
@depends(c_compiler)
def preprocess_option(compiler):
# The uses of PREPROCESS_OPTION depend on the spacing for -o/-Fi.
if compiler.type in ("gcc", "clang"):
return "-E -o "
else:
return "-P -Fi"
set_config("PREPROCESS_OPTION", preprocess_option)
# On Power ISA, determine compiler flags for VMX, VSX and VSX-3.
set_config(
"PPC_VMX_FLAGS",
["-maltivec"],
when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
)
set_config(
"PPC_VSX_FLAGS",
["-mvsx"],
when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
)
set_config(
"PPC_VSX3_FLAGS",
["-mvsx", "-mcpu=power9"],
when=depends(target.cpu)(lambda cpu: cpu.startswith("ppc")),
)
# TARGET_XPCOM_ABI
# ==============================================================
is_arm_eabi = c_compiler.try_compile(
body="""
#if defined(__ARM_EABI__)
return 0;
#else
#error Not ARM EABI.
#endif""",
check_msg="for ARM EABI",
when=building_with_gnu_compatible_cc
& depends(target.cpu)(lambda cpu: cpu == "arm"),
)
@depends(target, is_arm_eabi, c_compiler)
def target_xpcom_abi(target, is_arm_eabi, compiler):
if compiler.type == "clang-cl":
return f"{target.cpu}-msvc"
elif target.cpu == "arm":
target_compiler_abi = "eabi" if is_arm_eabi else "oabi"
return f"{target.cpu}-{target_compiler_abi}-gcc3"
else:
return f"{target.cpu}-gcc3"
set_config("TARGET_XPCOM_ABI", target_xpcom_abi)
set_define("TARGET_XPCOM_ABI", depends(target_xpcom_abi)(lambda v: f'"{v}"'))
# ASAN
# ==============================================================
option("--enable-address-sanitizer", help="Enable Address Sanitizer")
@depends(when="--enable-address-sanitizer")
def asan():
return True
with only_when(asan):
option(
env="MOZ_CLANG_RT_ASAN_LIB_PATH",
nargs=1,
help="Path to clang runtime asan library",
)
@depends(
c_compiler,
target,
"MOZ_CLANG_RT_ASAN_LIB_PATH",
)
@imports("os")
@imports("glob")
def clang_rt_asan_lib_path(c_compiler, target, clang_rt_asan_lib):
if clang_rt_asan_lib:
if os.path.exists(clang_rt_asan_lib[0]):
return clang_rt_asan_lib[0]
else:
die(
f"Specified MOZ_CLANG_RT_ASAN_LIB_PATH value '{clang_rt_asan_lib}' doesn't exist. "
)
# Look for the ASan runtime binary
if c_compiler.type == "clang-cl":
cpu = {"x86": "i386"}.get(target.cpu, target.cpu)
clang_rt_asan_lib = f"clang_rt.asan_dynamic-{cpu}.dll"
subdir = "windows"
elif target.os == "Android":
cpu = {"x86": "i686"}.get(target.cpu, target.cpu)
clang_rt_asan_lib = f"libclang_rt.asan-{cpu}-android.so"
subdir = "linux"
else:
return
search_path = os.path.join(
os.path.dirname(c_compiler.compiler),
"..",
"lib",
"clang",
"*",
"lib",
subdir,
clang_rt_asan_lib,
)
if candidates := glob.glob(search_path):
return candidates[0]
die(
f"Couldn't find {clang_rt_asan_lib}. "
f"It should be available in the same location as {c_compiler.type}."
)
set_config("MOZ_CLANG_RT_ASAN_LIB_PATH", clang_rt_asan_lib_path)
@depends(
c_compiler,
target,
compilation_flags,
linker_flags,
build_environment,
when=asan,
)
def asan_flags(c_compiler, target, compilation_flags, linker_flags, build_env):
if c_compiler.type == "clang-cl":
# Suppressing errors in recompiled code.
if target.os == "WINNT":
flag = f"-fsanitize-blacklist={build_env.topsrcdir}/build/sanitizers/asan_blacklist_win.txt"
compilation_flags.cflags.append(flag)
compilation_flags.cxxflags.append(flag)
asan_flag = "-fsanitize=address"
compilation_flags.cflags.append(asan_flag)
compilation_flags.cxxflags.append(asan_flag)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend([asan_flag, "-rdynamic"])
set_define("MOZ_ASAN", True, when=asan)
set_config("MOZ_ASAN", True, when=asan)
# MSAN
# ==============================================================
option("--enable-memory-sanitizer", help="Enable Memory Sanitizer")
@depends(when="--enable-memory-sanitizer")
def msan():
return True
@depends(c_compiler, compilation_flags, linker_flags, when=msan)
def msan_flags(c_compiler, compilation_flags, linker_flags):
flags = ["-fsanitize=memory", "-fsanitize-memory-track-origins"]
compilation_flags.cflags.extend(flags)
compilation_flags.cxxflags.extend(flags)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend(flags + ["-rdynamic"])
set_define("MOZ_MSAN", True, when=msan)
set_config("MOZ_MSAN", True, when=msan)
# TSAN
# ==============================================================
option("--enable-thread-sanitizer", help="Enable Thread Sanitizer")
@depends(when="--enable-thread-sanitizer")
def tsan():
return True
@depends(c_compiler, compilation_flags, linker_flags, when=tsan)
def tsan_flags(c_compiler, compilation_flags, linker_flags):
flag = "-fsanitize=thread"
compilation_flags.cflags.append(flag)
compilation_flags.cxxflags.append(flag)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend(["-fsanitize=thread", "-rdynamic"])
set_define("MOZ_TSAN", True, when=tsan)
set_config("MOZ_TSAN", True, when=tsan)
# UBSAN
# ==============================================================
option(
"--enable-undefined-sanitizer", nargs="*", help="Enable UndefinedBehavior Sanitizer"
)
@depends("--enable-undefined-sanitizer", moz_optimize)
def ubsan(options, optimize):
if not options:
return
default_checks = [
"bool",
"bounds",
"enum",
"function",
"integer-divide-by-zero",
"pointer-overflow",
"return",
"vla-bound",
]
# adding object-size generates a warning if -O0 is set
if optimize:
default_checks.append("object-size")
checks = options if len(options) else default_checks
return checks
@depends(
ubsan, c_compiler, compilation_flags, linker_flags, build_environment, when=ubsan
)
@imports(_from="__builtin__", _import="open")
@imports(_from="glob", _import="glob")
@imports("shutil")
def ubsan_flags(ubsan_checks, c_compiler, compilation_flags, linker_flags, build_env):
ubsan_txt = os.path.join(build_env.topobjdir, "ubsan_blacklist.txt")
with open(ubsan_txt, "w") as out_fd:
for blacklist in glob(
os.path.join(
build_env.topsrcdir, "build", "sanitizers", "ubsan_*_blacklist.txt"
)
):
with open(blacklist) as in_fd:
shutil.copyfileobj(in_fd, out_fd)
joined_ubsan_checks = ",".join(ubsan_checks)
flags = [
f"-fsanitize={joined_ubsan_checks}",
f"-fno-sanitize-recover={joined_ubsan_checks}",
f"-fsanitize-blacklist={ubsan_txt}",
]
compilation_flags.cflags.extend(flags)
compilation_flags.cxxflags.extend(flags)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend(["-fsanitize=undefined", "-rdynamic"])
option(
"--enable-signed-overflow-sanitizer",
help="Enable UndefinedBehavior Sanitizer (Signed Integer Overflow Parts)",
)
@depends(when="--enable-signed-overflow-sanitizer")
def ub_signed_overflow_san():
return True
@depends(
c_compiler,
compilation_flags,
linker_flags,
build_environment,
when=ub_signed_overflow_san,
)
def ub_signed_overflow_san_flags(
c_compiler, compilation_flags, linker_flags, build_env
):
sanitizer_blacklist = os.path.join(
build_env.topsrcdir,
"build",
"sanitizers",
"ubsan_signed_overflow_blacklist.txt",
)
flags = [
f"-fsanitize=signed-integer-overflow",
f"-fsanitize-blacklist={sanitizer_blacklist}",
]
compilation_flags.cflags.extend(flags)
compilation_flags.cxxflags.extend(flags)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend(["-fsanitize=signed-integer-overflow", "-rdynamic"])
option(
"--enable-unsigned-overflow-sanitizer",
help="Enable UndefinedBehavior Sanitizer (Unsigned Integer Overflow Parts)",
)
@depends(when="--enable-unsigned-overflow-sanitizer")
def ub_unsigned_overflow_san():
return True
@depends(
c_compiler,
compilation_flags,
linker_flags,
build_environment,
when=ub_unsigned_overflow_san,
)
def ub_unsigned_overflow_san_flags(
c_compiler, compilation_flags, linker_flags, build_env
):
sanitizer_blacklist = os.path.join(
build_env.topsrcdir,
"build",
"sanitizers",
"ubsan_unsigned_overflow_blacklist.txt",
)
flags = [
f"-fsanitize=unsigned-integer-overflow",
f"-fsanitize-blacklist={sanitizer_blacklist}",
]
compilation_flags.cflags.extend(flags)
compilation_flags.cxxflags.extend(flags)
if c_compiler.type != "clang-cl":
linker_flags.ldflags.extend(
["-fsanitize=unsigned-integer-overflow", "-rdynamic"]
)
#
any_ubsan = ubsan | ub_signed_overflow_san | ub_unsigned_overflow_san
set_define("MOZ_UBSAN", True, when=any_ubsan)
set_config("MOZ_UBSAN", any_ubsan)
# We only want to include windows.configure when we are compiling on
# Windows, or for Windows.
include("windows.configure", when=is_windows)
# Security Hardening
# ==============================================================
option(
"--enable-hardening",
env="MOZ_SECURITY_HARDENING",
help="Enables security hardening compiler options",
)
# This function is a bit confusing. It adds or removes hardening flags in
# three stuations: if --enable-hardening is passed; if --disable-hardening
# is passed, and if no flag is passed.
#
# At time of this comment writing, all flags are actually added in the
# default no-flag case; making --enable-hardening the same as omitting the
# flag. --disable-hardening will omit the security flags. (However, not all
# possible security flags will be omitted by --disable-hardening, as many are
# compiler-default options we do not explicitly enable.)
@depends(
"--enable-hardening",
"--enable-address-sanitizer",
"--enable-debug",
"--enable-optimize",
c_compiler,
target,
stdlibcxx_assertions,
)
def security_hardening_cflags(
hardening_flag, asan, debug, optimize, c_compiler, target, cxx_assert
):
compiler_is_gccish = c_compiler.type in ("gcc", "clang")
mingw_clang = c_compiler.type == "clang" and target.os == "WINNT"
flags = []
ldflags = []
trivial_auto_var_init = []
# WASI compiler doesn't support security hardening cflags
if target.os == "WASI":
return
# ----------------------------------------------------------
# If hardening is explicitly enabled, or not explicitly disabled
if hardening_flag.origin == "default" or hardening_flag:
# FORTIFY_SOURCE ------------------------------------
if compiler_is_gccish and optimize and not asan:
flags.append("-U_FORTIFY_SOURCE")
flags.append("-D_FORTIFY_SOURCE=2")
# Lib c++ debug mode
if cxx_assert:
flags.append(f"-D{cxx_assert}")
# fstack-protector ------------------------------------
# Enable only if hardening is not disabled and ASAN is
# not on as ASAN will catch the crashes for us
if compiler_is_gccish and not asan:
flags.append("-fstack-protector-strong")
ldflags.append("-fstack-protector-strong")
if (
c_compiler.type == "clang"
and c_compiler.version >= "11.0.1"
and target.os not in ("WINNT", "OSX", "OpenBSD")
and target.cpu in ("x86", "x86_64", "ppc64", "s390x")
):
flags.append("-fstack-clash-protection")
ldflags.append("-fstack-clash-protection")
# ftrivial-auto-var-init ------------------------------
# Initialize local variables with a 0xAA pattern in clang builds.
# Linux32 fails some xpcshell tests with -ftrivial-auto-var-init
linux32 = target.kernel == "Linux" and target.cpu == "x86"
if (
(c_compiler.type == "clang" or c_compiler.type == "clang-cl")
and c_compiler.version >= "8"
and not linux32
):
if c_compiler.type == "clang-cl":
trivial_auto_var_init.append("-Xclang")
trivial_auto_var_init.append("-ftrivial-auto-var-init=pattern")
# Always enable on debug builds.
if debug:
flags.extend(trivial_auto_var_init)
if (c_compiler.type == "clang" and c_compiler.version >= "16") or (
c_compiler.type == "gcc" and c_compiler.version >= "13"
):
# Cannot use level 3 because we have many uses of the [0] GNU syntax.
# Cannot use level 2 because sqlite3 and icu use the [1] GNU syntax.
flags.append("-fstrict-flex-arrays=1")
# ASLR ------------------------------------------------
# ASLR (dynamicbase) is enabled by default in clang-cl; but the
# mingw-clang build requires it to be explicitly enabled
if mingw_clang:
ldflags.append("-Wl,--dynamicbase")
# Control Flow Guard (CFG) ----------------------------
if (
c_compiler.type == "clang-cl"
and c_compiler.version >= "8"
and (target.cpu != "aarch64" or c_compiler.version >= "8.0.1")
):
if target.cpu == "aarch64" and c_compiler.version >= "10.0.0":
flags.append("-guard:cf,nochecks")
else:
flags.append("-guard:cf")
# nolongjmp is needed because clang doesn't emit the CFG tables of
ldflags.append("-guard:cf,nolongjmp")
# ----------------------------------------------------------
# If ASAN _is_ on, disable FORTIFY_SOURCE just to be safe
if asan:
flags.append("-D_FORTIFY_SOURCE=0")
# fno-common -----------------------------------------
# Do not merge variables for ASAN; can detect some subtle bugs
if asan:
# clang-cl does not recognize the flag, it must be passed down to clang
if c_compiler.type == "clang-cl":
flags.append("-Xclang")
flags.append("-fno-common")
return namespace(
flags=flags,
ldflags=ldflags,
trivial_auto_var_init=trivial_auto_var_init,
)
set_config("MOZ_HARDENING_CFLAGS", security_hardening_cflags.flags)
set_config("MOZ_HARDENING_LDFLAGS", security_hardening_cflags.ldflags)
set_config(
"MOZ_TRIVIAL_AUTO_VAR_INIT",
security_hardening_cflags.trivial_auto_var_init,
)
# Intel Control-flow Enforcement Technology
# ==============================================================
# We keep this separate from the hardening flags above, because we want to be
# able to easily remove the flags in the build files for certain executables.
@depends(c_compiler, target)
def cet_ldflags(c_compiler, target):
ldflags = []
if (
c_compiler.type == "clang-cl"
and c_compiler.version >= "11"
and target.cpu == "x86_64"
):
ldflags.append("-CETCOMPAT")
return ldflags
set_config("MOZ_CETCOMPAT_LDFLAGS", cet_ldflags)
# Frame pointers
# ==============================================================
@depends(c_compiler)
def frame_pointer_flags(compiler):
if compiler.type == "clang-cl":
return namespace(
enable=["-Oy-"],
disable=["-Oy"],
)
return namespace(
enable=["-fno-omit-frame-pointer", "-funwind-tables"],
disable=["-fomit-frame-pointer", "-funwind-tables"],
)
@depends(
moz_optimize,
moz_debug,
target,
"--enable-memory-sanitizer",
"--enable-address-sanitizer",
"--enable-undefined-sanitizer",
)
def frame_pointer_default(optimize, debug, target, msan, asan, ubsan):
return bool(
not optimize
or debug
or msan
or asan
or ubsan
or (target.os == "WINNT" and target.cpu in ("x86", "aarch64"))
or target.os == "OSX"
)
option(
"--enable-frame-pointers",
default=frame_pointer_default,
help="{Enable|Disable} frame pointers",
)
@depends("--enable-frame-pointers", frame_pointer_flags)
def frame_pointer_flags(enable, flags):
if enable:
return flags.enable
return flags.disable
set_config("MOZ_FRAMEPTR_FLAGS", frame_pointer_flags)
# Stack unwinding without frame pointers
# ==============================================================
have_unwind = check_symbol(
"_Unwind_Backtrace", when=check_header("unwind.h", when=target_is_unix)
)
# Code Coverage
# ==============================================================
option("--enable-coverage", env="MOZ_CODE_COVERAGE", help="Enable code coverage")
@depends("--enable-coverage")
def code_coverage(value):
if value:
return True
set_config("MOZ_CODE_COVERAGE", code_coverage)
set_define("MOZ_CODE_COVERAGE", code_coverage)
@depends(target, c_compiler, build_environment, when=code_coverage)
@imports("os")
@imports("re")
@imports(_from="__builtin__", _import="open")
def coverage_cflags(target, c_compiler, build_env):
cflags = ["--coverage"]
# clang 11 no longer accepts this flag (its behavior became the default)
if c_compiler.type in ("clang", "clang-cl") and c_compiler.version < "11.0.0":
cflags += [
"-Xclang",
"-coverage-no-function-names-in-data",
]
exclude = []
if target.os == "WINNT" and c_compiler.type == "clang-cl":
# VS files
exclude.append("^.*[vV][sS]20[0-9]{2}.*$")
# Files in fetches directory.
exclude.append("^.*[\\\\/]fetches[\\\\/].*$")
elif target.os == "OSX":
# Files in fetches directory.
exclude.append("^.*/fetches/.*$")
elif target.os == "GNU":
# Files in fetches directory.
exclude.append("^.*/fetches/.*$")
# Files in /usr/
exclude.append("^/usr/.*$")
if exclude:
exclude = ";".join(exclude)
cflags += [
f"-fprofile-exclude-files={exclude}",
]
response_file_path = os.path.join(build_env.topobjdir, "code_coverage_cflags")
with open(response_file_path, "w") as f:
f.write(" ".join(cflags))
return ["@{}".format(response_file_path)]
set_config("COVERAGE_CFLAGS", coverage_cflags)
# Assembler detection
# ==============================================================
option(env="AS", nargs=1, help="Path to the assembler")
@depends(target, c_compiler)
def as_info(target, c_compiler):
if c_compiler.type == "clang-cl":
ml = {
"x86": "ml.exe",
"x86_64": "ml64.exe",
"aarch64": "armasm64.exe",
}.get(target.cpu)
return namespace(type="masm", names=(ml,))
# When building with anything but clang-cl, we just use the C compiler as the assembler.
return namespace(type="gcc", names=(c_compiler.compiler,))
# One would expect the assembler to be specified merely as a program. But in
# cases where the assembler is passed down into js/, it can be specified in
# the same way as CC: a program + a list of argument flags. We might as well
# permit the same behavior in general, even though it seems somewhat unusual.
# So we have to do the same sort of dance as we did above with
# `provided_compiler`.
provided_assembler = provided_program("AS")
assembler = check_prog(
"_AS",
input=provided_assembler.program,
what="the assembler",
progs=as_info.names,
paths=vc_toolchain_search_path,
)
@depends(as_info, assembler, provided_assembler, c_compiler)
def as_with_flags(as_info, assembler, provided_assembler, c_compiler):
if provided_assembler:
return provided_assembler.wrapper + [assembler] + provided_assembler.flags
if as_info.type == "masm":
return assembler
assert as_info.type == "gcc"
# Need to add compiler wrappers and flags as appropriate.
return c_compiler.wrapper + [assembler] + c_compiler.flags
set_config("AS", as_with_flags)
@depends(assembler, c_compiler, extra_toolchain_flags)
@imports("subprocess")
@imports(_from="os", _import="devnull")
def gnu_as(assembler, c_compiler, toolchain_flags):
# clang uses a compatible GNU assembler.
if c_compiler.type == "clang":
return True
if c_compiler.type == "gcc":
cmd = [assembler] + c_compiler.flags
if toolchain_flags:
cmd += toolchain_flags
cmd += ["-Wa,--version", "-c", "-o", devnull, "-x", "assembler", "-"]
# We don't actually have to provide any input on stdin, `Popen.communicate` will
# close the stdin pipe.
# clang will error if it uses its integrated assembler for this target,
# so handle failures gracefully.
if "GNU" in check_cmd_output(*cmd, stdin=subprocess.PIPE, onerror=lambda: ""):
return True
set_config("GNU_AS", gnu_as)
@depends(as_info, target)
def as_dash_c_flag(as_info, target):
# armasm64 doesn't understand -c.
if as_info.type == "masm" and target.cpu == "aarch64":
return ""
else:
return "-c"
set_config("AS_DASH_C_FLAG", as_dash_c_flag)
@depends(as_info, target)
def as_outoption(as_info, target):
# The uses of ASOUTOPTION depend on the spacing for -o/-Fo.
if as_info.type == "masm" and target.cpu != "aarch64":
return "-Fo"
return "-o "
set_config("ASOUTOPTION", as_outoption)
# clang plugin handling
# ==============================================================
option(
"--enable-clang-plugin",
env="ENABLE_CLANG_PLUGIN",
help="Enable building with the Clang plugin (gecko specific static analyzers)",
)
set_config("ENABLE_CLANG_PLUGIN", True, when="--enable-clang-plugin")
set_define("MOZ_CLANG_PLUGIN", True, when="--enable-clang-plugin")
@depends(host_c_compiler, c_compiler, when="--enable-clang-plugin")
def llvm_config(host_c_compiler, c_compiler):
clang = None
for compiler in (host_c_compiler, c_compiler):
if compiler and compiler.type == "clang":
clang = compiler.compiler
break
elif compiler and compiler.type == "clang-cl":
clang = os.path.join(os.path.dirname(compiler.compiler), "clang")
break
if not clang:
die("Cannot --enable-clang-plugin when not building with clang")
llvm_config = "llvm-config"
out = check_cmd_output(clang, "--print-prog-name=llvm-config", onerror=lambda: None)
if out:
llvm_config = out.rstrip()
return (llvm_config,)
llvm_config = check_prog(
"LLVM_CONFIG",
llvm_config,
what="llvm-config",
when="--enable-clang-plugin",
paths=clang_search_path,
)
@template
def llvm_tool(name):
@depends(host_c_compiler, c_compiler, bindgen_config_paths)
def llvm_tool(host_c_compiler, c_compiler, bindgen_config_paths):
clang = None
for compiler in (host_c_compiler, c_compiler):
if compiler and compiler.type == "clang":
clang = compiler.compiler
break
elif compiler and compiler.type == "clang-cl":
clang = os.path.join(os.path.dirname(compiler.compiler), "clang")
break
if not clang and bindgen_config_paths:
clang = bindgen_config_paths.clang_path
tool = name
if clang:
out = check_cmd_output(
clang, "--print-prog-name=%s" % tool, onerror=lambda: None
)
if out:
tool = out.rstrip()
return (tool,)
return llvm_tool
llvm_objdump = check_prog(
"LLVM_OBJDUMP",
llvm_tool("llvm-objdump"),
what="llvm-objdump",
when="--enable-compile-environment",
paths=clang_search_path,
)
# Force clang-cl compiler to treat input as C++
# ==============================================================
add_flag("-TP", cxx_compiler, when=target_is_windows & ~building_with_gnu_compatible_cc)
# Use the old libstdc++ ABI
# ==============================================================
add_flag(
"-D_GLIBCXX_USE_CXX11_ABI=0",
cxx_compiler,
when=stdcxx_compat,
)
add_flag(
"-D_GLIBCXX_USE_CXX11_ABI=0",
host_cxx_compiler,
when=stdcxx_compat,
)
# Always included configuration file
# ==============================================================
@depends(c_compiler, build_environment, build_project)
def defines_cpp_flags(c_compiler, build_environment, build_project):
if build_project == "js":
config_h = "js/src/js-confdefs.h"
else:
config_h = "mozilla-config.h"
if c_compiler.type == "clang-cl":
flag = "-FI"
else:
flag = "-include"
return ["-DMOZILLA_CLIENT", flag, f"{build_environment.topobjdir}/{config_h}"]
set_config("OS_COMPILE_CFLAGS", defines_cpp_flags)
set_config("OS_COMPILE_CXXFLAGS", defines_cpp_flags)
# Support various fuzzing options
# ==============================================================
option("--enable-fuzzing", help="Enable fuzzing support")
@depends(build_project)
def js_build(build_project):
return build_project == "js"
option(
"--enable-js-fuzzilli",
when=js_build,
help="Enable fuzzilli support for the JS engine",
)
option(
"--enable-snapshot-fuzzing",
help="Enable experimental snapshot fuzzing support",
)
imply_option("--enable-fuzzing", True, when="--enable-snapshot-fuzzing")
@depends("--enable-snapshot-fuzzing")
def enable_snapshot_fuzzing(value):
if value:
return True
@depends("--enable-fuzzing", enable_snapshot_fuzzing)
def enable_fuzzing(value, snapshot_fuzzing):
if value or snapshot_fuzzing:
return True
@depends("--enable-js-fuzzilli", when=js_build)
def enable_js_fuzzilli(value):
if value:
return True
@depends(enable_fuzzing, enable_snapshot_fuzzing)
def check_aflfuzzer(fuzzing, snapshot_fuzzing):
if fuzzing and not snapshot_fuzzing:
return True
@depends(
try_compile(
body="__AFL_COMPILER;", check_msg="for AFL compiler", when=check_aflfuzzer
)
)
def enable_aflfuzzer(afl):
if afl:
return True
@depends(enable_fuzzing, enable_aflfuzzer, enable_snapshot_fuzzing, c_compiler, target)
def enable_libfuzzer(fuzzing, afl, snapshot_fuzzing, c_compiler, target):
if (
fuzzing
and not afl
and not snapshot_fuzzing
and c_compiler.type == "clang"
and target.os != "Android"
):
return True
@depends(enable_fuzzing, enable_aflfuzzer, enable_libfuzzer, enable_js_fuzzilli)
def enable_fuzzing_interfaces(fuzzing, afl, libfuzzer, enable_js_fuzzilli):
if fuzzing and (afl or libfuzzer) and not enable_js_fuzzilli:
return True
set_config("FUZZING", enable_fuzzing)
set_define("FUZZING", enable_fuzzing)
set_config("LIBFUZZER", enable_libfuzzer)
set_define("LIBFUZZER", enable_libfuzzer)
set_config("AFLFUZZ", enable_aflfuzzer)
set_define("AFLFUZZ", enable_aflfuzzer)
set_config("FUZZING_INTERFACES", enable_fuzzing_interfaces)
set_define("FUZZING_INTERFACES", enable_fuzzing_interfaces)
set_config("FUZZING_JS_FUZZILLI", enable_js_fuzzilli)
set_define("FUZZING_JS_FUZZILLI", enable_js_fuzzilli)
set_config("FUZZING_SNAPSHOT", enable_snapshot_fuzzing)
set_define("FUZZING_SNAPSHOT", enable_snapshot_fuzzing)
@depends(
c_compiler.try_compile(
flags=["-fsanitize=fuzzer-no-link"],
when=enable_fuzzing,
check_msg="whether the C compiler supports -fsanitize=fuzzer-no-link",
),
tsan,
enable_js_fuzzilli,
)
def libfuzzer_flags(value, tsan, enable_js_fuzzilli):
if tsan:
# With ThreadSanitizer, we should not use any libFuzzer instrumentation because
# it is incompatible (e.g. there are races on global sanitizer coverage counters).
# Instead we use an empty set of flags here but still build the fuzzing targets.
# With this setup, we can still run files through these targets in TSan builds,
# e.g. those obtained from regular fuzzing.
# This code can be removed once libFuzzer has been made compatible with TSan.
#
# Also, this code needs to be kept in sync with certain gyp files, currently:
# - dom/media/webrtc/transport/third_party/nICEr/nicer.gyp
return namespace(no_link_flag_supported=False, use_flags=[])
if enable_js_fuzzilli:
# Fuzzilli comes with its own trace-pc interceptors and flag requirements.
no_link_flag_supported = False
use_flags = ["-fsanitize-coverage=trace-pc-guard", "-g"]
elif value:
no_link_flag_supported = True
# recommended for (and only supported by) clang >= 6
use_flags = ["-fsanitize=fuzzer-no-link"]
else:
no_link_flag_supported = False
use_flags = ["-fsanitize-coverage=trace-pc-guard,trace-cmp"]
return namespace(
no_link_flag_supported=no_link_flag_supported,
use_flags=use_flags,
)
@depends(libfuzzer_flags, when=enable_libfuzzer)
def sancov(libfuzzer_flags):
return any(
flag.startswith(head)
for head in ("-fsanitize-coverage", "-fsanitize=fuzzer")
for flag in libfuzzer_flags.use_flags
)
set_config("HAVE_LIBFUZZER_FLAG_FUZZER_NO_LINK", libfuzzer_flags.no_link_flag_supported)
set_config("LIBFUZZER_FLAGS", libfuzzer_flags.use_flags)
# Required for stand-alone (sanitizer-less) libFuzzer.
# ==============================================================
@depends(libfuzzer_flags, linker_flags, when=enable_libfuzzer)
def add_libfuzzer_flags(libfuzzer_flags, linker_flags):
linker_flags.ldflags.extend(libfuzzer_flags.use_flags)
linker_flags.ldflags.append("-rdynamic")
# The LLVM symbolizer is used by all sanitizers
check_prog(
"LLVM_SYMBOLIZER",
("llvm-symbolizer",),
allow_missing=True,
paths=clang_search_path,
when=asan | msan | tsan | any_ubsan | enable_fuzzing,
)
# Shared library building
# ==============================================================
# XXX: The use of makefile constructs in these variables is awful.
@depends(target, c_compiler)
def make_shared_library(target, compiler):
if target.os == "WINNT":
if compiler.type == "gcc":
return namespace(
mkshlib=["$(CXX)", "$(DSO_LDOPTS)", "-o", "$@"],
mkcshlib=["$(CC)", "$(DSO_LDOPTS)", "-o", "$@"],
)
elif compiler.type == "clang":
return namespace(
mkshlib=[
"$(CXX)",
"$(DSO_LDOPTS)",
"-Wl,-pdb,$(LINK_PDBFILE)",
"-o",
"$@",
],
mkcshlib=[
"$(CC)",
"$(DSO_LDOPTS)",
"-Wl,-pdb,$(LINK_PDBFILE)",
"-o",
"$@",
],
)
else:
linker = [
"$(LINKER)",
"-NOLOGO",
"-DLL",
"-OUT:$@",
"-PDB:$(LINK_PDBFILE)",
"$(DSO_LDOPTS)",
]
return namespace(
mkshlib=linker,
mkcshlib=linker,
)
cc = ["$(CC)", "$(COMPUTED_C_LDFLAGS)"]
cxx = ["$(CXX)", "$(COMPUTED_CXX_LDFLAGS)"]
flags = ["$(DSO_LDOPTS)"]
output = ["-o", "$@"]
if target.kernel == "Darwin":
soname = []
elif target.os == "NetBSD":
soname = ["-Wl,-soname,$(DSO_SONAME)"]
else:
assert compiler.type in ("gcc", "clang")
soname = ["-Wl,-h,$(DSO_SONAME)"]
return namespace(
mkshlib=cxx + flags + soname + output,
mkcshlib=cc + flags + soname + output,
)
set_config("MKSHLIB", make_shared_library.mkshlib)
set_config("MKCSHLIB", make_shared_library.mkcshlib)
@depends(c_compiler, toolchain_prefix, when=target_is_windows)
def rc_names(c_compiler, toolchain_prefix):
if c_compiler.type in ("gcc", "clang"):
return tuple("%s%s" % (p, "windres") for p in ("",) + (toolchain_prefix or ()))
return ("llvm-rc",)
check_prog("RC", rc_names, paths=clang_search_path, when=target_is_windows)
@template
def ar_config(c_compiler, toolchain_prefix=None):
if not toolchain_prefix:
toolchain_prefix = dependable(None)
@depends(toolchain_prefix, c_compiler)
def ar_config(toolchain_prefix, c_compiler):
if c_compiler.type == "clang-cl":
return namespace(
names=("llvm-lib",),
flags=("-llvmlibthin", "-out:$@"),
)
names = tuple("%s%s" % (p, "ar") for p in (toolchain_prefix or ()) + ("",))
if c_compiler.type == "clang":
# Get the llvm-ar path as per the output from clang --print-prog-name=llvm-ar
# so that we directly get the one under the clang directory, rather than one
# that might be in /usr/bin and that might point to one from a different version
# of clang.
out = check_cmd_output(
c_compiler.compiler, "--print-prog-name=llvm-ar", onerror=lambda: None
)
llvm_ar = out.rstrip() if out else "llvm-ar"
names = (llvm_ar,) + names
return namespace(
names=names,
flags=("crs", "$@"),
)
return ar_config
target_ar_config = ar_config(c_compiler, toolchain_prefix)
target_ar = check_prog("AR", target_ar_config.names, paths=clang_search_path)
set_config("AR_FLAGS", target_ar_config.flags)
@depends(c_compiler, extra_toolchain_flags, target_ar, target_ar_config)
@checking("whether ar supports response files")
@imports("os")
@imports(_from="__builtin__", _import="FileNotFoundError")
@imports(_from="__builtin__", _import="open")
@imports(_from="mozbuild.configure.util", _import="LineIO")
def ar_supports_response_files(c_compiler, extra_toolchain_flags, ar, ar_config):
lib_path = list_path = None
with create_temporary_file(suffix=".o") as obj_path:
if (
try_invoke_compiler(
# No configure_cache because it would not create the
# expected output file.
None,
[c_compiler.compiler] + c_compiler.flags,
c_compiler.language,
"void foo() {}",
["-c", "-o", obj_path] + (extra_toolchain_flags or []),
wrapper=c_compiler.wrapper,
onerror=lambda: None,
)
is not None
):
with create_temporary_file(suffix=".list") as list_path:
with open(list_path, "w") as fd:
fd.write(obj_path)
log.debug("Creating `%s` with content:", list_path)
log.debug("| %s", obj_path)
with create_temporary_file(suffix=".a") as lib_path:
os.remove(lib_path)
ar_command = (
[ar]
+ [x.replace("$@", lib_path) for x in ar_config.flags]
+ ["@" + list_path]
)
result = check_cmd_output(*ar_command, onerror=lambda: None)
return result is not None
set_config("AR_SUPPORTS_RESPONSE_FILE", True, when=ar_supports_response_files)
host_ar_config = ar_config(host_c_compiler)
check_prog("HOST_AR", host_ar_config.names, paths=clang_search_path)
@depends(toolchain_prefix, c_compiler)
def nm_names(toolchain_prefix, c_compiler):
names = tuple("%s%s" % (p, "nm") for p in (toolchain_prefix or ()) + ("",))
if c_compiler.type == "clang":
# Get the llvm-nm path as per the output from clang --print-prog-name=llvm-nm
# so that we directly get the one under the clang directory, rather than one
# that might be in /usr/bin and that might point to one from a different version
# of clang.
out = check_cmd_output(
c_compiler.compiler, "--print-prog-name=llvm-nm", onerror=lambda: None
)
llvm_nm = out.rstrip() if out else "llvm-nm"
names = (llvm_nm,) + names
return names
check_prog("NM", nm_names, paths=clang_search_path, when=target_has_linux_kernel)
# We don't use it in the code, but it can be useful for debugging, so give
# the user the option of enabling it.
option("--enable-cpp-rtti", help="Enable C++ RTTI")
@depends(compilation_flags, c_compiler, "--enable-cpp-rtti")
def enable_cpp_rtti(compilation_flags, c_compiler, enable_rtti):
if enable_rtti:
return
if c_compiler.type == "clang-cl":
compilation_flags.cxxflags.append("-GR-")
else:
compilation_flags.cxxflags.append("-fno-rtti")
option(
"--enable-path-remapping",
nargs="*",
choices=("c", "rust"),
help="Enable remapping source and object paths in compiled outputs",
)
@depends("--enable-path-remapping")
def path_remapping(value):
if len(value):
return value
if bool(value):
return ["c", "rust"]
return []
@depends(
target,
build_environment,
target_sysroot.path,
valid_windows_sdk_dir,
vc_path,
when="--enable-path-remapping",
)
def path_remappings(target, build_env, sysroot_path, windows_sdk_dir, vc_path):
win = target.kernel == "WINNT"
# The prefix maps are processed in the order they're specified on the
# command line. Therefore, to accommodate object directories in the source
# directory, it's important that we map the topobjdir before the topsrcdir,
# 'cuz we might have /src/obj/=/o/ and /src/=/s/. The various other
# directories might be subdirectories of topsrcdir as well, so they come
# earlier still.
path_remappings = []
# We will have only one sysroot or SDK, so all can have the same mnemonic: K
# for "kit" (since S is taken for "source"). See
# for how to use the Windows `subst` command to map these in debuggers and
# IDEs.
if sysroot_path:
path_remappings.append((sysroot_path, "k:/" if win else "/sysroot/"))
if windows_sdk_dir:
path_remappings.append(
(windows_sdk_dir.path, "k:/" if win else "/windows_sdk/")
)
if vc_path:
path_remappings.append((vc_path, "v:/" if win else "/vc/"))
path_remappings += [
(build_env.topobjdir, "o:/" if win else "/topobjdir/"),
(build_env.topsrcdir, "s:/" if win else "/topsrcdir/"),
]
path_remappings = [
(normsep(old).rstrip("/") + "/", new) for old, new in path_remappings
]
# It is tempting to sort these, but we want the order to be the same across
# machines so that we can share cache hits. Therefore we reject bad
# configurations rather than trying to make the configuration good.
for i in range(len(path_remappings) - 1):
p = path_remappings[i][0]
for q, _ in path_remappings[i + 1 :]:
if q.startswith(p):
die(f"Cannot remap paths because {p} is an ancestor of {q}")
return path_remappings
@depends(target)
def is_intel_target(target):
return target.cpu in ("x86", "x86_64")
@depends(target)
def is_aarch64_target(target):
return target.cpu == "aarch64"
set_config("MMX_FLAGS", ["-mmmx"])
set_config("SSE_FLAGS", ["-msse"])
set_config("SSE2_FLAGS", ["-msse2"])
set_config("SSSE3_FLAGS", ["-mssse3"])
set_config("SSE4_2_FLAGS", ["-msse4.2"])
set_config("FMA_FLAGS", ["-mfma"])
set_config("AVX2_FLAGS", ["-mavx2"])
set_config(
"AVXVNNI_FLAGS",
["-mavxvnni"],
try_compile(
check_msg="for -mavxvnni support", flags=["-mavxvnni"], when=is_intel_target
),
)
set_config(
"AVX512BW_FLAGS",
["-mavx512bw", "-mavx512f", "-mavx512dq", "-mavx512cd"],
try_compile(
check_msg="for -mavx512bw support",
flags=["-mavx512bw", "-mavx512f", "-mavx512dq", "-mavx512cd"],
when=is_intel_target,
),
)
# AVX512VNNI can be based on either avx512bw or avx512vbmi. We choose the
# former.
set_config(
"AVX512VNNI_FLAGS",
["-mavx512vnni", "-mavx512bw", "-mavx512f", "-mavx512dq", "-mavx512cd"],
try_compile(
check_msg="for -mavx512vnni support",
flags=["-mavx512vnni", "-mavx512bw", "-mavx512f", "-mavx512dq", "-mavx512cd"],
when=is_intel_target,
),
)
set_config(
"NEON_I8MM_FLAGS",
["-march=armv8.2-a+i8mm"],
try_compile(
check_msg="for i8mm target feature",
flags=["-march=armv8.2-a+i8mm"],
when=is_aarch64_target,
),
)
set_config(
"SVE2_FLAGS",
["-march=armv9-a+sve2"],
try_compile(
check_msg="for ARM SVE2 target feature",
flags=["-march=armv9-a+sve2"],
when=is_aarch64_target,
),
)
set_config(
"DOTPROD_FLAGS",
["-march=armv8.2-a+dotprod"],
try_compile(
check_msg="for ARM dotprod target feature",
flags=["-march=armv8.2-a+dotprod"],
when=is_aarch64_target,
),
)
# dtrace support
##
option("--enable-dtrace", help="Build with dtrace support")
dtrace = check_header(
"sys/sdt.h",
when="--enable-dtrace",
onerror=lambda: die("dtrace enabled but sys/sdt.h not found"),
)
set_config("HAVE_DTRACE", True, when=dtrace)
set_define("INCLUDE_MOZILLA_DTRACE", True, when=dtrace)