Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmGenerator.h"
#include "mozilla/EnumeratedRange.h"
#include "mozilla/SHA1.h"
#include <algorithm>
#include "jit/Assembler.h"
#include "jit/JitOptions.h"
#include "js/Printf.h"
#include "threading/Thread.h"
#include "util/Memory.h"
#include "util/Text.h"
#include "vm/HelperThreads.h"
#include "vm/Time.h"
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmCompile.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmIonCompile.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmSummarizeInsn.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::EnumeratedArray;
using mozilla::MakeEnumeratedRange;
bool CompiledCode::swap(MacroAssembler& masm) {
MOZ_ASSERT(bytes.empty());
if (!masm.swapBuffer(bytes)) {
return false;
}
callSites.swap(masm.callSites());
callSiteTargets.swap(masm.callSiteTargets());
trapSites.swap(masm.trapSites());
symbolicAccesses.swap(masm.symbolicAccesses());
tryNotes.swap(masm.tryNotes());
codeRangeUnwindInfos.swap(masm.codeRangeUnwindInfos());
callRefMetricsPatches.swap(masm.callRefMetricsPatches());
codeLabels.swap(masm.codeLabels());
return true;
}
// ****************************************************************************
// ModuleGenerator
static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
ModuleGenerator::MacroAssemblerScope::MacroAssemblerScope(LifoAlloc& lifo)
: masmAlloc(&lifo), masm(masmAlloc, /* limitedSize= */ false) {}
ModuleGenerator::ModuleGenerator(const CodeMetadata& codeMeta,
const CompilerEnvironment& compilerEnv,
CompileState compileState,
const mozilla::Atomic<bool>* cancelled,
UniqueChars* error,
UniqueCharsVector* warnings)
: compileArgs_(codeMeta.compileArgs.get()),
compileState_(compileState),
error_(error),
warnings_(warnings),
cancelled_(cancelled),
codeMeta_(&codeMeta),
compilerEnv_(&compilerEnv),
featureUsage_(FeatureUsage::None),
codeBlock_(nullptr),
linkData_(nullptr),
lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE, js::MallocArena),
masm_(nullptr),
debugStubCodeOffset_(0),
requestTierUpStubCodeOffset_(0),
lastPatchedCallSite_(0),
startOfUnpatchedCallsites_(0),
numCallRefMetrics_(0),
parallel_(false),
outstanding_(0),
currentTask_(nullptr),
batchedBytecode_(0),
finishedFuncDefs_(false) {
MOZ_ASSERT(codeMeta_->isPreparedForCompile());
}
ModuleGenerator::~ModuleGenerator() {
MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
if (parallel_) {
if (outstanding_) {
AutoLockHelperThreadState lock;
// Remove any pending compilation tasks from the worklist.
size_t removed =
RemovePendingWasmCompileTasks(taskState_, compileState_, lock);
MOZ_ASSERT(outstanding_ >= removed);
outstanding_ -= removed;
// Wait until all active compilation tasks have finished.
while (true) {
MOZ_ASSERT(outstanding_ >= taskState_.finished().length());
outstanding_ -= taskState_.finished().length();
taskState_.finished().clear();
MOZ_ASSERT(outstanding_ >= taskState_.numFailed());
outstanding_ -= taskState_.numFailed();
taskState_.numFailed() = 0;
if (!outstanding_) {
break;
}
taskState_.condVar().wait(lock); /* failed or finished */
}
}
} else {
MOZ_ASSERT(!outstanding_);
}
// Propagate error state.
if (error_ && !*error_) {
AutoLockHelperThreadState lock;
*error_ = std::move(taskState_.errorMessage());
}
}
bool ModuleGenerator::initializeCompleteTier(
CodeMetadataForAsmJS* codeMetaForAsmJS) {
MOZ_ASSERT(compileState_ != CompileState::LazyTier2);
// Initialize our task system
if (!initTasks()) {
return false;
}
// If codeMetaForAsmJS is null, we're compiling wasm; else we're compiling
// asm.js, in whih case it contains wasm::Code-lifetime asm.js-specific
// information.
MOZ_ASSERT(isAsmJS() == !!codeMetaForAsmJS);
codeMetaForAsmJS_ = codeMetaForAsmJS;
// Generate the shared stubs block, if we're compiling tier-1
if (compilingTier1() && !prepareTier1()) {
return false;
}
return startCompleteTier();
}
bool ModuleGenerator::initializePartialTier(const Code& code,
uint32_t funcIndex) {
MOZ_ASSERT(compileState_ == CompileState::LazyTier2);
MOZ_ASSERT(!isAsmJS());
// Initialize our task system
if (!initTasks()) {
return false;
}
// The implied codeMeta must be consistent with the one we already have.
MOZ_ASSERT(&code.codeMeta() == codeMeta_);
MOZ_ASSERT(!partialTieringCode_);
partialTieringCode_ = &code;
return startPartialTier(funcIndex);
}
bool ModuleGenerator::funcIsCompiledInBlock(uint32_t funcIndex) const {
return codeBlock_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
}
const CodeRange& ModuleGenerator::funcCodeRangeInBlock(
uint32_t funcIndex) const {
MOZ_ASSERT(funcIsCompiledInBlock(funcIndex));
const CodeRange& cr =
codeBlock_->codeRanges[codeBlock_->funcToCodeRange[funcIndex]];
MOZ_ASSERT(cr.isFunction());
return cr;
}
static bool InRange(uint32_t caller, uint32_t callee) {
// We assume JumpImmediateRange is defined conservatively enough that the
// slight difference between 'caller' (which is really the return address
// offset) and the actual base of the relative displacement computation
// isn't significant.
uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
if (caller < callee) {
return callee - caller < range;
}
return caller - callee < range;
}
using OffsetMap =
HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
using TrapMaybeOffsetArray =
EnumeratedArray<Trap, mozilla::Maybe<uint32_t>, size_t(Trap::Limit)>;
bool ModuleGenerator::linkCallSites() {
AutoCreatedBy acb(*masm_, "linkCallSites");
masm_->haltingAlign(CodeAlignment);
// Create far jumps for calls that have relative offsets that may otherwise
// go out of range. This method is called both between function bodies (at a
// frequency determined by the ISA's jump range) and once at the very end of
// a module's codegen after all possible calls/traps have been emitted.
OffsetMap existingCallFarJumps;
for (; lastPatchedCallSite_ < codeBlock_->callSites.length();
lastPatchedCallSite_++) {
const CallSite& callSite = codeBlock_->callSites[lastPatchedCallSite_];
const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
uint32_t callerOffset = callSite.returnAddressOffset();
switch (callSite.kind()) {
case CallSiteDesc::Import:
case CallSiteDesc::Indirect:
case CallSiteDesc::IndirectFast:
case CallSiteDesc::Symbolic:
case CallSiteDesc::Breakpoint:
case CallSiteDesc::EnterFrame:
case CallSiteDesc::LeaveFrame:
case CallSiteDesc::CollapseFrame:
case CallSiteDesc::FuncRef:
case CallSiteDesc::FuncRefFast:
case CallSiteDesc::ReturnStub:
case CallSiteDesc::StackSwitch:
case CallSiteDesc::RequestTierUp:
break;
case CallSiteDesc::ReturnFunc:
case CallSiteDesc::Func: {
auto patch = [this, callSite](uint32_t callerOffset,
uint32_t calleeOffset) {
if (callSite.kind() == CallSiteDesc::ReturnFunc) {
masm_->patchFarJump(CodeOffset(callerOffset), calleeOffset);
} else {
MOZ_ASSERT(callSite.kind() == CallSiteDesc::Func);
masm_->patchCall(callerOffset, calleeOffset);
}
};
if (funcIsCompiledInBlock(target.funcIndex())) {
uint32_t calleeOffset =
funcCodeRangeInBlock(target.funcIndex()).funcUncheckedCallEntry();
if (InRange(callerOffset, calleeOffset)) {
patch(callerOffset, calleeOffset);
break;
}
}
OffsetMap::AddPtr p =
existingCallFarJumps.lookupForAdd(target.funcIndex());
if (!p) {
Offsets offsets;
offsets.begin = masm_->currentOffset();
if (!callFarJumps_.emplaceBack(target.funcIndex(),
masm_->farJumpWithPatch().offset())) {
return false;
}
offsets.end = masm_->currentOffset();
if (masm_->oom()) {
return false;
}
if (!codeBlock_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
offsets)) {
return false;
}
if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
return false;
}
}
patch(callerOffset, p->value());
break;
}
}
}
masm_->flushBuffer();
return !masm_->oom();
}
void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
const CodeRange& codeRange) {
switch (codeRange.kind()) {
case CodeRange::Function:
MOZ_ASSERT(codeBlock_->funcToCodeRange[codeRange.funcIndex()] ==
BAD_CODE_RANGE);
codeBlock_->funcToCodeRange.insertInfallible(codeRange.funcIndex(),
codeRangeIndex);
break;
case CodeRange::InterpEntry:
codeBlock_->lookupFuncExport(codeRange.funcIndex())
.initEagerInterpEntryOffset(codeRange.begin());
break;
case CodeRange::JitEntry:
// Nothing to do: jit entries are linked in the jump tables.
break;
case CodeRange::ImportJitExit:
funcImports_[codeRange.funcIndex()].initJitExitOffset(codeRange.begin());
break;
case CodeRange::ImportInterpExit:
funcImports_[codeRange.funcIndex()].initInterpExitOffset(
codeRange.begin());
break;
case CodeRange::DebugStub:
MOZ_ASSERT(!debugStubCodeOffset_);
debugStubCodeOffset_ = codeRange.begin();
break;
case CodeRange::RequestTierUpStub:
MOZ_ASSERT(!requestTierUpStubCodeOffset_);
requestTierUpStubCodeOffset_ = codeRange.begin();
break;
case CodeRange::TrapExit:
MOZ_ASSERT(!linkData_->trapOffset);
linkData_->trapOffset = codeRange.begin();
break;
case CodeRange::Throw:
// Jumped to by other stubs, so nothing to do.
break;
case CodeRange::FarJumpIsland:
case CodeRange::BuiltinThunk:
MOZ_CRASH("Unexpected CodeRange kind");
}
}
// Append every element from `srcVec` where `filterOp(srcElem) == true`.
// Applies `mutateOp(dstElem)` to every element that is appended.
template <class Vec, class FilterOp, class MutateOp>
static bool AppendForEach(Vec* dstVec, const Vec& srcVec, FilterOp filterOp,
MutateOp mutateOp) {
// Eagerly grow the vector to the whole src vector. Any filtered elements
// will be trimmed later.
if (!dstVec->growByUninitialized(srcVec.length())) {
return false;
}
using T = typename Vec::ElementType;
T* dstBegin = dstVec->begin();
T* dstEnd = dstVec->end();
// We appended srcVec.length() elements at the beginning, so we append
// elements starting at the first uninitialized element.
T* dst = dstEnd - srcVec.length();
for (const T* src = srcVec.begin(); src != srcVec.end(); src++) {
if (!filterOp(src)) {
continue;
}
new (dst) T(*src);
mutateOp(dst - dstBegin, dst);
dst++;
}
// Trim off the filtered out elements that were eagerly added at the
// beginning
size_t newSize = dst - dstBegin;
if (newSize != dstVec->length()) {
dstVec->shrinkTo(newSize);
}
return true;
}
template <typename T>
bool FilterNothing(const T* element) {
return true;
}
// The same as the above `AppendForEach`, without performing any filtering.
template <class Vec, class MutateOp>
static bool AppendForEach(Vec* dstVec, const Vec& srcVec, MutateOp mutateOp) {
using T = typename Vec::ElementType;
return AppendForEach(dstVec, srcVec, &FilterNothing<T>, mutateOp);
}
bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
AutoCreatedBy acb(*masm_, "ModuleGenerator::linkCompiledCode");
JitContext jcx;
// Combine observed features from the compiled code into the metadata
featureUsage_ |= code.featureUsage;
if (compilingTier1() && mode() == CompileMode::LazyTiering) {
// All the CallRefMetrics from this batch of functions will start indexing
// at our current length of metrics.
uint32_t startOfCallRefMetrics = numCallRefMetrics_;
for (const FuncCompileOutput& func : code.funcs) {
// We only compile defined functions, not imported functions
MOZ_ASSERT(func.index >= codeMeta_->numFuncImports);
uint32_t funcDefIndex = func.index - codeMeta_->numFuncImports;
// This function should only be compiled once
MOZ_ASSERT(funcDefFeatureUsages_[funcDefIndex] == FeatureUsage::None);
// Track the feature usage for this function
funcDefFeatureUsages_[funcDefIndex] = func.featureUsage;
// Record the range of CallRefMetrics this function owns. The metrics
// will be processed below when we patch the offsets into code.
MOZ_ASSERT(func.callRefMetricsRange.begin +
func.callRefMetricsRange.length <=
code.callRefMetricsPatches.length());
funcDefCallRefMetrics_[funcDefIndex] = func.callRefMetricsRange;
funcDefCallRefMetrics_[funcDefIndex].offsetBy(startOfCallRefMetrics);
}
} else {
MOZ_ASSERT(funcDefFeatureUsages_.empty());
MOZ_ASSERT(funcDefCallRefMetrics_.empty());
MOZ_ASSERT(code.callRefMetricsPatches.empty());
#ifdef DEBUG
for (const FuncCompileOutput& func : code.funcs) {
MOZ_ASSERT(func.callRefMetricsRange.length == 0);
}
#endif
}
// Before merging in new code, if calls in a prior code range might go out of
// range, insert far jumps to extend the range.
if (!InRange(startOfUnpatchedCallsites_,
masm_->size() + code.bytes.length())) {
startOfUnpatchedCallsites_ = masm_->size();
if (!linkCallSites()) {
return false;
}
}
// All code offsets in 'code' must be incremented by their position in the
// overall module when the code was appended.
masm_->haltingAlign(CodeAlignment);
const size_t offsetInModule = masm_->size();
if (code.bytes.length() != 0 &&
!masm_->appendRawCode(code.bytes.begin(), code.bytes.length())) {
return false;
}
auto codeRangeOp = [offsetInModule, this](uint32_t codeRangeIndex,
CodeRange* codeRange) {
codeRange->offsetBy(offsetInModule);
noteCodeRange(codeRangeIndex, *codeRange);
};
if (!AppendForEach(&codeBlock_->codeRanges, code.codeRanges, codeRangeOp)) {
return false;
}
auto callSiteOp = [=](uint32_t, CallSite* cs) {
cs->offsetBy(offsetInModule);
};
if (!AppendForEach(&codeBlock_->callSites, code.callSites, callSiteOp)) {
return false;
}
if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
return false;
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
auto trapSiteOp = [=](uint32_t, TrapSite* ts) {
ts->offsetBy(offsetInModule);
};
if (!AppendForEach(&codeBlock_->trapSites[trap], code.trapSites[trap],
trapSiteOp)) {
return false;
}
}
for (const SymbolicAccess& access : code.symbolicAccesses) {
uint32_t patchAt = offsetInModule + access.patchAt.offset();
if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
return false;
}
}
for (const CallRefMetricsPatch& patch : code.callRefMetricsPatches) {
if (!patch.hasOffsetOfOffsetPatch()) {
numCallRefMetrics_ += 1;
continue;
}
CodeOffset offset = CodeOffset(patch.offsetOfOffsetPatch());
offset.offsetBy(offsetInModule);
size_t callRefIndex = numCallRefMetrics_;
numCallRefMetrics_ += 1;
size_t callRefMetricOffset = callRefIndex * sizeof(CallRefMetrics);
// Compute the offset of the metrics, and patch it. This may overflow,
// in which case we report an OOM. We might need to do something smarter
// here.
if (callRefMetricOffset > (INT32_MAX / sizeof(CallRefMetrics))) {
return false;
}
masm_->patchMove32(offset, int32_t(callRefMetricOffset));
}
for (const CodeLabel& codeLabel : code.codeLabels) {
LinkData::InternalLink link;
link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
link.targetOffset = offsetInModule + codeLabel.target().offset();
#ifdef JS_CODELABEL_LINKMODE
link.mode = codeLabel.linkMode();
#endif
if (!linkData_->internalLinks.append(link)) {
return false;
}
}
for (size_t i = 0; i < code.stackMaps.length(); i++) {
StackMaps::Maplet maplet = code.stackMaps.move(i);
maplet.offsetBy(offsetInModule);
if (!codeBlock_->stackMaps.add(maplet)) {
// This function is now the only owner of maplet.map, so we'd better
// free it right now.
maplet.map->destroy();
return false;
}
}
auto unwindInfoOp = [=](uint32_t, CodeRangeUnwindInfo* i) {
i->offsetBy(offsetInModule);
};
if (!AppendForEach(&codeBlock_->codeRangeUnwindInfos,
code.codeRangeUnwindInfos, unwindInfoOp)) {
return false;
}
auto tryNoteFilter = [](const TryNote* tn) {
// Filter out all try notes that were never given a try body. This may
// happen due to dead code elimination.
return tn->hasTryBody();
};
auto tryNoteOp = [=](uint32_t, TryNote* tn) { tn->offsetBy(offsetInModule); };
return AppendForEach(&codeBlock_->tryNotes, code.tryNotes, tryNoteFilter,
tryNoteOp);
}
static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
MOZ_ASSERT(task->lifo.isEmpty());
MOZ_ASSERT(task->output.empty());
switch (task->compilerEnv.tier()) {
case Tier::Optimized:
if (!IonCompileFunctions(task->codeMeta, task->compilerEnv, task->lifo,
task->inputs, &task->output, error)) {
return false;
}
break;
case Tier::Baseline:
if (!BaselineCompileFunctions(task->codeMeta, task->compilerEnv,
task->lifo, task->inputs, &task->output,
error)) {
return false;
}
break;
}
MOZ_ASSERT(task->lifo.isEmpty());
MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
task->inputs.clear();
return true;
}
void CompileTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
UniqueChars error;
bool ok;
{
AutoUnlockHelperThreadState unlock(lock);
ok = ExecuteCompileTask(this, &error);
}
// Don't release the lock between updating our state and returning from this
// method.
if (!ok || !state.finished().append(this)) {
state.numFailed()++;
if (!state.errorMessage()) {
state.errorMessage() = std::move(error);
}
}
state.condVar().notify_one(); /* failed or finished */
}
ThreadType CompileTask::threadType() {
switch (compileState) {
case CompileState::Once:
case CompileState::EagerTier1:
case CompileState::LazyTier1:
return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER1;
case CompileState::EagerTier2:
case CompileState::LazyTier2:
return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER2;
default:
MOZ_CRASH();
}
}
bool ModuleGenerator::initTasks() {
// Determine whether parallel or sequential compilation is to be used and
// initialize the CompileTasks that will be used in either mode.
MOZ_ASSERT(GetHelperThreadCount() > 1);
MOZ_ASSERT(!parallel_);
uint32_t numTasks = 1;
if ( // "obvious" prerequisites for doing off-thread compilation
CanUseExtraThreads() && GetHelperThreadCPUCount() > 1 &&
// For lazy tier 2 compilations, the current thread -- running a
// WasmPartialTier2CompileTask -- is already dedicated to compiling the
// to-be-tiered-up function. So don't create a new task for it.
compileState_ != CompileState::LazyTier2) {
parallel_ = true;
numTasks = 2 * GetMaxWasmCompilationThreads();
}
if (!tasks_.initCapacity(numTasks)) {
return false;
}
for (size_t i = 0; i < numTasks; i++) {
tasks_.infallibleEmplaceBack(*codeMeta_, *compilerEnv_, compileState_,
taskState_,
COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
}
if (!freeTasks_.reserve(numTasks)) {
return false;
}
for (size_t i = 0; i < numTasks; i++) {
freeTasks_.infallibleAppend(&tasks_[i]);
}
return true;
}
bool ModuleGenerator::locallyCompileCurrentTask() {
if (!ExecuteCompileTask(currentTask_, error_)) {
return false;
}
if (!finishTask(currentTask_)) {
return false;
}
currentTask_ = nullptr;
batchedBytecode_ = 0;
return true;
}
bool ModuleGenerator::finishTask(CompileTask* task) {
AutoCreatedBy acb(*masm_, "ModuleGenerator::finishTask");
masm_->haltingAlign(CodeAlignment);
if (!linkCompiledCode(task->output)) {
return false;
}
task->output.clear();
MOZ_ASSERT(task->inputs.empty());
MOZ_ASSERT(task->output.empty());
MOZ_ASSERT(task->lifo.isEmpty());
freeTasks_.infallibleAppend(task);
return true;
}
bool ModuleGenerator::launchBatchCompile() {
MOZ_ASSERT(currentTask_);
if (cancelled_ && *cancelled_) {
return false;
}
if (!parallel_) {
return locallyCompileCurrentTask();
}
if (!StartOffThreadWasmCompile(currentTask_, compileState_)) {
return false;
}
outstanding_++;
currentTask_ = nullptr;
batchedBytecode_ = 0;
return true;
}
bool ModuleGenerator::finishOutstandingTask() {
MOZ_ASSERT(parallel_);
CompileTask* task = nullptr;
{
AutoLockHelperThreadState lock;
while (true) {
MOZ_ASSERT(outstanding_ > 0);
if (taskState_.numFailed() > 0) {
return false;
}
if (!taskState_.finished().empty()) {
outstanding_--;
task = taskState_.finished().popCopy();
break;
}
taskState_.condVar().wait(lock); /* failed or finished */
}
}
// Call outside of the compilation lock.
return finishTask(task);
}
bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
uint32_t lineOrBytecode,
const uint8_t* begin, const uint8_t* end,
Uint32Vector&& lineNums) {
MOZ_ASSERT(!finishedFuncDefs_);
MOZ_ASSERT(funcIndex < codeMeta_->numFuncs());
if (compilingTier1()) {
static_assert(MaxFunctionBytes < UINT32_MAX);
uint32_t bodyLength = (uint32_t)(end - begin);
funcDefRanges_.infallibleAppend(FuncDefRange(lineOrBytecode, bodyLength));
}
uint32_t threshold;
switch (tier()) {
case Tier::Baseline:
threshold = JitOptions.wasmBatchBaselineThreshold;
break;
case Tier::Optimized:
threshold = JitOptions.wasmBatchIonThreshold;
break;
default:
MOZ_CRASH("Invalid tier value");
break;
}
uint32_t funcBytecodeLength = end - begin;
// Do not go over the threshold if we can avoid it: spin off the compilation
// before appending the function if we would go over. (Very large single
// functions may still exceed the threshold but this is fine; it'll be very
// uncommon and is in any case safely handled by the MacroAssembler's buffer
// limit logic.)
if (currentTask_ && currentTask_->inputs.length() &&
batchedBytecode_ + funcBytecodeLength > threshold) {
if (!launchBatchCompile()) {
return false;
}
}
if (!currentTask_) {
if (freeTasks_.empty() && !finishOutstandingTask()) {
return false;
}
currentTask_ = freeTasks_.popCopy();
}
if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
std::move(lineNums))) {
return false;
}
batchedBytecode_ += funcBytecodeLength;
MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
return true;
}
bool ModuleGenerator::finishFuncDefs() {
MOZ_ASSERT(!finishedFuncDefs_);
if (currentTask_ && !locallyCompileCurrentTask()) {
return false;
}
finishedFuncDefs_ = true;
return true;
}
static void CheckCodeBlock(const CodeBlock& codeBlock) {
#if defined(DEBUG)
// Assert all sorted metadata is sorted.
uint32_t last = 0;
for (const CodeRange& codeRange : codeBlock.codeRanges) {
MOZ_ASSERT(codeRange.begin() >= last);
last = codeRange.end();
}
last = 0;
for (const CallSite& callSite : codeBlock.callSites) {
MOZ_ASSERT(callSite.returnAddressOffset() >= last);
last = callSite.returnAddressOffset();
}
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
last = 0;
for (const TrapSite& trapSite : codeBlock.trapSites[trap]) {
MOZ_ASSERT(trapSite.pcOffset >= last);
last = trapSite.pcOffset;
}
}
last = 0;
for (const CodeRangeUnwindInfo& info : codeBlock.codeRangeUnwindInfos) {
MOZ_ASSERT(info.offset() >= last);
last = info.offset();
}
// Try notes should be sorted so that the end of ranges are in rising order
// so that the innermost catch handler is chosen.
last = 0;
for (const wasm::TryNote& tryNote : codeBlock.tryNotes) {
MOZ_ASSERT(tryNote.tryBodyEnd() >= last);
MOZ_ASSERT(tryNote.tryBodyEnd() > tryNote.tryBodyBegin());
last = tryNote.tryBodyBegin();
}
// Check that the stackmap vector is sorted with no duplicates, and each
// entry points to a plausible instruction.
const uint8_t* previousNextInsnAddr = nullptr;
for (size_t i = 0; i < codeBlock.stackMaps.length(); i++) {
const StackMaps::Maplet& maplet = codeBlock.stackMaps.get(i);
MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) >
uintptr_t(previousNextInsnAddr));
previousNextInsnAddr = maplet.nextInsnAddr;
MOZ_ASSERT(IsPlausibleStackMapKey(maplet.nextInsnAddr),
"wasm stackmap does not reference a valid insn");
}
# if (defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_ARM) || \
defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64))
// Check that each trapsite is associated with a plausible instruction. The
// required instruction kind depends on the trapsite kind.
//
// NOTE: currently enabled on x86_{32,64}, arm{32,64}, loongson64 and mips64.
// Ideally it should be extended to riscv64 too.
//
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
const TrapSiteVector& trapSites = codeBlock.trapSites[trap];
for (const TrapSite& trapSite : trapSites) {
const uint8_t* insnAddr = ((const uint8_t*)(codeBlock.segment->base())) +
uintptr_t(trapSite.pcOffset);
// `expected` describes the kind of instruction we expect to see at
// `insnAddr`. Find out what is actually there and check it matches.
const TrapMachineInsn expected = trapSite.insn;
mozilla::Maybe<TrapMachineInsn> actual =
SummarizeTrapInstruction(insnAddr);
bool valid = actual.isSome() && actual.value() == expected;
// This is useful for diagnosing validation failures.
// if (!valid) {
// fprintf(stderr,
// "FAIL: reason=%-22s expected=%-12s "
// "pcOffset=%-5u addr= %p\n",
// NameOfTrap(trap), NameOfTrapMachineInsn(expected),
// trapSite.pcOffset, insnAddr);
// if (actual.isSome()) {
// fprintf(stderr, "FAIL: identified as %s\n",
// actual.isSome() ? NameOfTrapMachineInsn(actual.value())
// : "(insn not identified)");
// }
// }
MOZ_ASSERT(valid, "wasm trapsite does not reference a valid insn");
}
}
# endif
#endif
}
bool ModuleGenerator::startCodeBlock(CodeBlockKind kind) {
MOZ_ASSERT(!masmScope_ && !linkData_ && !codeBlock_);
masmScope_.emplace(lifo_);
masm_ = &masmScope_->masm;
linkData_ = js::MakeUnique<LinkData>();
codeBlock_ = js::MakeUnique<CodeBlock>(kind);
return !!linkData_ && !!codeBlock_;
}
UniqueCodeBlock ModuleGenerator::finishCodeBlock(UniqueLinkData* linkData) {
// Now that all functions and stubs are generated and their CodeRanges
// known, patch all calls (which can emit far jumps) and far jumps. Linking
// can emit tiny far-jump stubs, so there is an ordering dependency here.
if (!linkCallSites()) {
return nullptr;
}
for (CallFarJump far : callFarJumps_) {
if (funcIsCompiledInBlock(far.targetFuncIndex)) {
masm_->patchFarJump(
jit::CodeOffset(far.jumpOffset),
funcCodeRangeInBlock(far.targetFuncIndex).funcUncheckedCallEntry());
} else if (!linkData_->callFarJumps.append(far)) {
return nullptr;
}
}
lastPatchedCallSite_ = 0;
startOfUnpatchedCallsites_ = 0;
callSiteTargets_.clear();
callFarJumps_.clear();
// None of the linking or far-jump operations should emit masm metadata.
MOZ_ASSERT(masm_->callSites().empty());
MOZ_ASSERT(masm_->callSiteTargets().empty());
MOZ_ASSERT(masm_->trapSites().empty());
MOZ_ASSERT(masm_->symbolicAccesses().empty());
MOZ_ASSERT(masm_->tryNotes().empty());