Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineCodeGen.h"
#include "mozilla/Casting.h"
#include "gc/GC.h"
#include "jit/BaselineIC.h"
#include "jit/BaselineJIT.h"
#include "jit/CacheIRCompiler.h"
#include "jit/CacheIRGenerator.h"
#include "jit/CalleeToken.h"
#include "jit/FixedList.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitcodeMap.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/PerfSpewer.h"
#include "jit/SharedICHelpers.h"
#include "jit/TrialInlining.h"
#include "jit/VMFunctions.h"
#include "js/friend/ErrorMessages.h" // JSMSG_*
#include "js/UniquePtr.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/BuiltinObjectKind.h"
#include "vm/EnvironmentObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Interpreter.h"
#include "vm/JSFunction.h"
#include "vm/Time.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "debugger/DebugAPI-inl.h"
#include "jit/BaselineFrameInfo-inl.h"
#include "jit/JitScript-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::TraceKind;
using mozilla::AssertedCast;
using mozilla::Maybe;
namespace js {
class PlainObject;
namespace jit {
BaselineCompilerHandler::BaselineCompilerHandler(JSContext* cx,
MacroAssembler& masm,
TempAllocator& alloc,
JSScript* script)
: frame_(script, masm),
alloc_(alloc),
analysis_(alloc, script),
#ifdef DEBUG
masm_(masm),
#endif
script_(script),
pc_(script->code()),
icEntryIndex_(0),
compileDebugInstrumentation_(script->isDebuggee()),
ionCompileable_(IsIonEnabled(cx) && CanIonCompileScript(cx, script)) {
}
BaselineInterpreterHandler::BaselineInterpreterHandler(JSContext* cx,
MacroAssembler& masm)
: frame_(masm) {}
template <typename Handler>
template <typename... HandlerArgs>
BaselineCodeGen<Handler>::BaselineCodeGen(JSContext* cx, TempAllocator& alloc,
HandlerArgs&&... args)
: handler(cx, masm, std::forward<HandlerArgs>(args)...),
cx(cx),
masm(cx, alloc),
frame(handler.frame()) {}
BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc,
JSScript* script)
: BaselineCodeGen(cx, alloc, /* HandlerArgs = */ alloc, script),
profilerPushToggleOffset_() {
#ifdef JS_CODEGEN_NONE
MOZ_CRASH();
#endif
}
BaselineInterpreterGenerator::BaselineInterpreterGenerator(JSContext* cx,
TempAllocator& alloc)
: BaselineCodeGen(cx, alloc /* no handlerArgs */) {}
bool BaselineCompilerHandler::init(JSContext* cx) {
if (!analysis_.init(alloc_)) {
return false;
}
uint32_t len = script_->length();
if (!labels_.init(alloc_, len)) {
return false;
}
for (size_t i = 0; i < len; i++) {
new (&labels_[i]) Label();
}
if (!frame_.init(alloc_)) {
return false;
}
return true;
}
bool BaselineCompiler::init() {
if (!handler.init(cx)) {
return false;
}
return true;
}
bool BaselineCompilerHandler::recordCallRetAddr(JSContext* cx,
RetAddrEntry::Kind kind,
uint32_t retOffset) {
uint32_t pcOffset = script_->pcToOffset(pc_);
// Entries must be sorted by pcOffset for binary search to work.
// See BaselineScript::retAddrEntryFromPCOffset.
MOZ_ASSERT_IF(!retAddrEntries_.empty(),
retAddrEntries_.back().pcOffset() <= pcOffset);
// Similarly, entries must be sorted by return offset and this offset must be
// unique. See BaselineScript::retAddrEntryFromReturnOffset.
MOZ_ASSERT_IF(!retAddrEntries_.empty() && !masm_.oom(),
retAddrEntries_.back().returnOffset().offset() < retOffset);
if (!retAddrEntries_.emplaceBack(pcOffset, kind, CodeOffset(retOffset))) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
bool BaselineInterpreterHandler::recordCallRetAddr(JSContext* cx,
RetAddrEntry::Kind kind,
uint32_t retOffset) {
switch (kind) {
case RetAddrEntry::Kind::DebugPrologue:
MOZ_ASSERT(callVMOffsets_.debugPrologueOffset == 0,
"expected single DebugPrologue call");
callVMOffsets_.debugPrologueOffset = retOffset;
break;
case RetAddrEntry::Kind::DebugEpilogue:
MOZ_ASSERT(callVMOffsets_.debugEpilogueOffset == 0,
"expected single DebugEpilogue call");
callVMOffsets_.debugEpilogueOffset = retOffset;
break;
case RetAddrEntry::Kind::DebugAfterYield:
MOZ_ASSERT(callVMOffsets_.debugAfterYieldOffset == 0,
"expected single DebugAfterYield call");
callVMOffsets_.debugAfterYieldOffset = retOffset;
break;
default:
break;
}
return true;
}
bool BaselineInterpreterHandler::addDebugInstrumentationOffset(
JSContext* cx, CodeOffset offset) {
if (!debugInstrumentationOffsets_.append(offset.offset())) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
MethodStatus BaselineCompiler::compile() {
AutoCreatedBy acb(masm, "BaselineCompiler::compile");
JSScript* script = handler.script();
JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%u:%u (%p)",
script->filename(), script->lineno(), script->column(), script);
JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%u:%u",
script->filename(), script->lineno(), script->column());
AutoIncrementalTimer timer(cx->realm()->timers.baselineCompileTime);
AutoKeepJitScripts keepJitScript(cx);
if (!script->ensureHasJitScript(cx, keepJitScript)) {
return Method_Error;
}
// When code coverage is enabled, we have to create the ScriptCounts if they
// do not exist.
if (!script->hasScriptCounts() && cx->realm()->collectCoverageForDebug()) {
if (!script->initScriptCounts(cx)) {
return Method_Error;
}
}
// Suppress GC during compilation.
gc::AutoSuppressGC suppressGC(cx);
MOZ_ASSERT(!script->hasBaselineScript());
if (!emitPrologue()) {
return Method_Error;
}
MethodStatus status = emitBody();
if (status != Method_Compiled) {
return status;
}
if (!emitEpilogue()) {
return Method_Error;
}
if (!emitOutOfLinePostBarrierSlot()) {
return Method_Error;
}
AutoCreatedBy acb2(masm, "exception_tail");
Linker linker(masm);
if (masm.oom()) {
ReportOutOfMemory(cx);
return Method_Error;
}
JitCode* code = linker.newCode(cx, CodeKind::Baseline);
if (!code) {
return Method_Error;
}
UniquePtr<BaselineScript> baselineScript(
BaselineScript::New(
cx, warmUpCheckPrologueOffset_.offset(),
profilerEnterFrameToggleOffset_.offset(),
profilerExitFrameToggleOffset_.offset(),
handler.retAddrEntries().length(), handler.osrEntries().length(),
debugTrapEntries_.length(), script->resumeOffsets().size()),
JS::DeletePolicy<BaselineScript>(cx->runtime()));
if (!baselineScript) {
return Method_Error;
}
baselineScript->setMethod(code);
JitSpew(JitSpew_BaselineScripts,
"Created BaselineScript %p (raw %p) for %s:%u:%u",
(void*)baselineScript.get(), (void*)code->raw(), script->filename(),
script->lineno(), script->column());
baselineScript->copyRetAddrEntries(handler.retAddrEntries().begin());
baselineScript->copyOSREntries(handler.osrEntries().begin());
baselineScript->copyDebugTrapEntries(debugTrapEntries_.begin());
// If profiler instrumentation is enabled, toggle instrumentation on.
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
cx->runtime())) {
baselineScript->toggleProfilerInstrumentation(true);
}
// Compute native resume addresses for the script's resume offsets.
baselineScript->computeResumeNativeOffsets(script, resumeOffsetEntries_);
if (compileDebugInstrumentation()) {
baselineScript->setHasDebugInstrumentation();
}
// Always register a native => bytecode mapping entry, since profiler can be
// turned on with baseline jitcode on stack, and baseline jitcode cannot be
// invalidated.
{
JitSpew(JitSpew_Profiling,
"Added JitcodeGlobalEntry for baseline script %s:%u:%u (%p)",
script->filename(), script->lineno(), script->column(),
baselineScript.get());
// Generate profiling string.
UniqueChars str = GeckoProfilerRuntime::allocProfileString(cx, script);
if (!str) {
return Method_Error;
}
auto entry = MakeJitcodeGlobalEntry<BaselineEntry>(
cx, code, code->raw(), code->rawEnd(), script, std::move(str));
if (!entry) {
return Method_Error;
}
JitcodeGlobalTable* globalTable =
cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(std::move(entry))) {
ReportOutOfMemory(cx);
return Method_Error;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
script->jitScript()->setBaselineScript(script, baselineScript.release());
perfSpewer_.saveProfile(script, code);
#ifdef MOZ_VTUNE
vtune::MarkScript(code, script, "baseline");
#endif
return Method_Compiled;
}
// On most platforms we use a dedicated bytecode PC register to avoid many
// dependent loads and stores for sequences of simple bytecode ops. This
// register must be saved/restored around VM and IC calls.
//
// On 32-bit x86 we don't have enough registers for this (because R0-R2 require
// 6 registers) so there we always store the pc on the frame.
static constexpr bool HasInterpreterPCReg() {
return InterpreterPCReg != InvalidReg;
}
static Register LoadBytecodePC(MacroAssembler& masm, Register scratch) {
if (HasInterpreterPCReg()) {
return InterpreterPCReg;
}
Address pcAddr(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
masm.loadPtr(pcAddr, scratch);
return scratch;
}
static void LoadInt8Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load8SignExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint8Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load8ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint16Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load16ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadInt32Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load32(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadInt32OperandSignExtendToPtr(MacroAssembler& masm, Register pc,
Register dest) {
masm.load32SignExtendToPtr(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint24Operand(MacroAssembler& masm, size_t offset,
Register dest) {
// Load the opcode and operand, then left shift to discard the opcode.
Register pc = LoadBytecodePC(masm, dest);
masm.load32(Address(pc, offset), dest);
masm.rshift32(Imm32(8), dest);
}
static void LoadInlineValueOperand(MacroAssembler& masm, ValueOperand dest) {
// Note: the Value might be unaligned but as above we rely on all our
// platforms having appropriate support for unaligned accesses (except for
// floating point instructions on ARM).
Register pc = LoadBytecodePC(masm, dest.scratchReg());
masm.loadUnalignedValue(Address(pc, sizeof(jsbytecode)), dest);
}
template <>
void BaselineCompilerCodeGen::loadScript(Register dest) {
masm.movePtr(ImmGCPtr(handler.script()), dest);
}
template <>
void BaselineInterpreterCodeGen::loadScript(Register dest) {
masm.loadPtr(frame.addressOfInterpreterScript(), dest);
}
template <>
void BaselineCompilerCodeGen::saveInterpreterPCReg() {}
template <>
void BaselineInterpreterCodeGen::saveInterpreterPCReg() {
if (HasInterpreterPCReg()) {
masm.storePtr(InterpreterPCReg, frame.addressOfInterpreterPC());
}
}
template <>
void BaselineCompilerCodeGen::restoreInterpreterPCReg() {}
template <>
void BaselineInterpreterCodeGen::restoreInterpreterPCReg() {
if (HasInterpreterPCReg()) {
masm.loadPtr(frame.addressOfInterpreterPC(), InterpreterPCReg);
}
}
template <>
void BaselineCompilerCodeGen::emitInitializeLocals() {
// Initialize all locals to |undefined|. Lexical bindings are temporal
// dead zoned in bytecode.
size_t n = frame.nlocals();
if (n == 0) {
return;
}
// Use R0 to minimize code size. If the number of locals to push is <
// LOOP_UNROLL_FACTOR, then the initialization pushes are emitted directly
// and inline. Otherwise, they're emitted in a partially unrolled loop.
static const size_t LOOP_UNROLL_FACTOR = 4;
size_t toPushExtra = n % LOOP_UNROLL_FACTOR;
masm.moveValue(UndefinedValue(), R0);
// Handle any extra pushes left over by the optional unrolled loop below.
for (size_t i = 0; i < toPushExtra; i++) {
masm.pushValue(R0);
}
// Partially unrolled loop of pushes.
if (n >= LOOP_UNROLL_FACTOR) {
size_t toPush = n - toPushExtra;
MOZ_ASSERT(toPush % LOOP_UNROLL_FACTOR == 0);
MOZ_ASSERT(toPush >= LOOP_UNROLL_FACTOR);
masm.move32(Imm32(toPush), R1.scratchReg());
// Emit unrolled loop with 4 pushes per iteration.
Label pushLoop;
masm.bind(&pushLoop);
for (size_t i = 0; i < LOOP_UNROLL_FACTOR; i++) {
masm.pushValue(R0);
}
masm.branchSub32(Assembler::NonZero, Imm32(LOOP_UNROLL_FACTOR),
R1.scratchReg(), &pushLoop);
}
}
template <>
void BaselineInterpreterCodeGen::emitInitializeLocals() {
// Push |undefined| for all locals.
Register scratch = R0.scratchReg();
loadScript(scratch);
masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
scratch);
masm.load32(Address(scratch, ImmutableScriptData::offsetOfNfixed()), scratch);
Label top, done;
masm.branchTest32(Assembler::Zero, scratch, scratch, &done);
masm.bind(&top);
{
masm.pushValue(UndefinedValue());
masm.branchSub32(Assembler::NonZero, Imm32(1), scratch, &top);
}
masm.bind(&done);
}
// On input:
// R2.scratchReg() contains object being written to.
// Called with the baseline stack synced, except for R0 which is preserved.
// All other registers are usable as scratch.
// This calls:
// void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
template <typename Handler>
bool BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot() {
AutoCreatedBy acb(masm,
"BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot");
if (!postBarrierSlot_.used()) {
return true;
}
masm.bind(&postBarrierSlot_);
saveInterpreterPCReg();
Register objReg = R2.scratchReg();
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
MOZ_ASSERT(!regs.has(FramePointer));
regs.take(R0);
regs.take(objReg);
Register scratch = regs.takeAny();
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
// On ARM, save the link register before calling. It contains the return
// address. The |masm.ret()| later will pop this into |pc| to return.
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.push(ra);
#elif defined(JS_CODEGEN_LOONG64)
masm.push(ra);
#endif
masm.pushValue(R0);
using Fn = void (*)(JSRuntime * rt, js::gc::Cell * cell);
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmPtr(cx->runtime()), scratch);
masm.passABIArg(scratch);
masm.passABIArg(objReg);
masm.callWithABI<Fn, PostWriteBarrier>();
restoreInterpreterPCReg();
masm.popValue(R0);
masm.ret();
return true;
}
// Scan the a cache IR stub's fields and create an allocation site for any that
// refer to the catch-all unknown allocation site. This will be the case for
// stubs created when running in the interpreter. This happens on transition to
// baseline.
static bool CreateAllocSitesForCacheIRStub(JSScript* script,
ICCacheIRStub* stub) {
const CacheIRStubInfo* stubInfo = stub->stubInfo();
uint8_t* stubData = stub->stubDataStart();
uint32_t field = 0;
size_t offset = 0;
while (true) {
StubField::Type fieldType = stubInfo->fieldType(field);
if (fieldType == StubField::Type::Limit) {
break;
}
if (fieldType == StubField::Type::AllocSite) {
gc::AllocSite* site =
stubInfo->getPtrStubField<ICCacheIRStub, gc::AllocSite>(stub, offset);
if (site->kind() == gc::AllocSite::Kind::Unknown) {
gc::AllocSite* newSite = script->createAllocSite();
if (!newSite) {
return false;
}
stubInfo->replaceStubRawWord(stubData, offset, uintptr_t(site),
uintptr_t(newSite));
}
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
return true;
}
static void CreateAllocSitesForICChain(JSScript* script, uint32_t entryIndex) {
JitScript* jitScript = script->jitScript();
ICStub* stub = jitScript->icEntry(entryIndex).firstStub();
while (!stub->isFallback()) {
if (!CreateAllocSitesForCacheIRStub(script, stub->toCacheIRStub())) {
// This is an optimization and safe to skip if we hit OOM or per-zone
// limit.
return;
}
stub = stub->toCacheIRStub()->next();
}
}
template <>
bool BaselineCompilerCodeGen::emitNextIC() {
AutoCreatedBy acb(masm, "emitNextIC");
// Emit a call to an IC stored in JitScript. Calls to this must match the
// ICEntry order in JitScript: first the non-op IC entries for |this| and
// formal arguments, then the for-op IC entries for JOF_IC ops.
JSScript* script = handler.script();
uint32_t pcOffset = script->pcToOffset(handler.pc());
// We don't use every ICEntry and we can skip unreachable ops, so we have
// to loop until we find an ICEntry for the current pc.
const ICFallbackStub* stub;
uint32_t entryIndex;
do {
stub = script->jitScript()->fallbackStub(handler.icEntryIndex());
entryIndex = handler.icEntryIndex();
handler.moveToNextICEntry();
} while (stub->pcOffset() < pcOffset);
MOZ_ASSERT(stub->pcOffset() == pcOffset);
MOZ_ASSERT(BytecodeOpHasIC(JSOp(*handler.pc())));
if (BytecodeOpCanHaveAllocSite(JSOp(*handler.pc()))) {
CreateAllocSitesForICChain(script, entryIndex);
}
// Load stub pointer into ICStubReg.
masm.loadPtr(frame.addressOfICScript(), ICStubReg);
size_t firstStubOffset = ICScript::offsetOfFirstStub(entryIndex);
masm.loadPtr(Address(ICStubReg, firstStubOffset), ICStubReg);
CodeOffset returnOffset;
EmitCallIC(masm, &returnOffset);
RetAddrEntry::Kind kind = RetAddrEntry::Kind::IC;
if (!handler.retAddrEntries().emplaceBack(pcOffset, kind, returnOffset)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitNextIC() {
saveInterpreterPCReg();
masm.loadPtr(frame.addressOfInterpreterICEntry(), ICStubReg);
masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
uint32_t returnOffset = masm.currentOffset();
restoreInterpreterPCReg();
// If this is an IC for a bytecode op where Ion may inline scripts, we need to
// record the return offset for Ion bailouts.
if (handler.currentOp()) {
JSOp op = *handler.currentOp();
MOZ_ASSERT(BytecodeOpHasIC(op));
if (IsIonInlinableOp(op)) {
if (!handler.icReturnOffsets().emplaceBack(returnOffset, op)) {
return false;
}
}
}
return true;
}
template <>
void BaselineCompilerCodeGen::computeFrameSize(Register dest) {
MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
masm.move32(Imm32(frame.frameSize()), dest);
}
template <>
void BaselineInterpreterCodeGen::computeFrameSize(Register dest) {
// dest := FramePointer - StackPointer.
MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
masm.mov(FramePointer, dest);
masm.subStackPtrFrom(dest);
}
template <typename Handler>
void BaselineCodeGen<Handler>::prepareVMCall() {
pushedBeforeCall_ = masm.framePushed();
#ifdef DEBUG
inCall_ = true;
#endif
// Ensure everything is synced.
frame.syncStack(0);
}
template <>
void BaselineCompilerCodeGen::storeFrameSizeAndPushDescriptor(
uint32_t argSize, Register scratch) {
#ifdef DEBUG
masm.store32(Imm32(frame.frameSize()), frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
template <>
void BaselineInterpreterCodeGen::storeFrameSizeAndPushDescriptor(
uint32_t argSize, Register scratch) {
#ifdef DEBUG
// Store the frame size without VMFunction arguments in debug builds.
// scratch := FramePointer - StackPointer - argSize.
masm.mov(FramePointer, scratch);
masm.subStackPtrFrom(scratch);
masm.sub32(Imm32(argSize), scratch);
masm.store32(scratch, frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
static uint32_t GetVMFunctionArgSize(const VMFunctionData& fun) {
return fun.explicitStackSlots() * sizeof(void*);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::callVMInternal(VMFunctionId id,
RetAddrEntry::Kind kind,
CallVMPhase phase) {
#ifdef DEBUG
// Assert prepareVMCall() has been called.
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
const VMFunctionData& fun = GetVMFunction(id);
uint32_t argSize = GetVMFunctionArgSize(fun);
// Assert all arguments were pushed.
MOZ_ASSERT(masm.framePushed() - pushedBeforeCall_ == argSize);
saveInterpreterPCReg();
if (phase == CallVMPhase::AfterPushingLocals) {
storeFrameSizeAndPushDescriptor(argSize, R0.scratchReg());
} else {
MOZ_ASSERT(phase == CallVMPhase::BeforePushingLocals);
#ifdef DEBUG
uint32_t frameBaseSize = BaselineFrame::frameSizeForNumValueSlots(0);
masm.store32(Imm32(frameBaseSize), frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
MOZ_ASSERT(fun.expectTailCall == NonTailCall);
// Perform the call.
masm.call(code);
uint32_t callOffset = masm.currentOffset();
// Pop arguments from framePushed.
masm.implicitPop(argSize);
restoreInterpreterPCReg();
return handler.recordCallRetAddr(cx, kind, callOffset);
}
template <typename Handler>
template <typename Fn, Fn fn>
bool BaselineCodeGen<Handler>::callVM(RetAddrEntry::Kind kind,
CallVMPhase phase) {
VMFunctionId fnId = VMFunctionToId<Fn, fn>::id;
return callVMInternal(fnId, kind, phase);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::emitStackCheck() {
Label skipCall;
if (handler.mustIncludeSlotsInStackCheck()) {
// Subtract the size of script->nslots() first.
Register scratch = R1.scratchReg();
masm.moveStackPtrTo(scratch);
subtractScriptSlotsSize(scratch, R2.scratchReg());
masm.branchPtr(Assembler::BelowOrEqual,
AbsoluteAddress(cx->addressOfJitStackLimit()), scratch,
&skipCall);
} else {
masm.branchStackPtrRhs(Assembler::BelowOrEqual,
AbsoluteAddress(cx->addressOfJitStackLimit()),
&skipCall);
}
prepareVMCall();
masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
pushArg(R1.scratchReg());
const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
const RetAddrEntry::Kind kind = RetAddrEntry::Kind::StackCheck;
using Fn = bool (*)(JSContext*, BaselineFrame*);
if (!callVM<Fn, CheckOverRecursedBaseline>(kind, phase)) {
return false;
}
masm.bind(&skipCall);
return true;
}
static void EmitCallFrameIsDebuggeeCheck(MacroAssembler& masm) {
using Fn = void (*)(BaselineFrame * frame);
masm.setupUnalignedABICall(R0.scratchReg());
masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
masm.passABIArg(R0.scratchReg());
masm.callWithABI<Fn, FrameIsDebuggeeCheck>();
}
template <>
bool BaselineCompilerCodeGen::emitIsDebuggeeCheck() {
if (handler.compileDebugInstrumentation()) {
EmitCallFrameIsDebuggeeCheck(masm);
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitIsDebuggeeCheck() {
// Use a toggled jump to call FrameIsDebuggeeCheck only if the debugger is
// enabled.
//
// TODO(bug 1522394): consider having a cx->realm->isDebuggee guard before the
// call. Consider moving the callWithABI out-of-line.
Label skipCheck;
CodeOffset toggleOffset = masm.toggledJump(&skipCheck);
{
saveInterpreterPCReg();
EmitCallFrameIsDebuggeeCheck(masm);
restoreInterpreterPCReg();
}
masm.bind(&skipCheck);
return handler.addDebugInstrumentationOffset(cx, toggleOffset);
}
static void MaybeIncrementCodeCoverageCounter(MacroAssembler& masm,
JSScript* script,
jsbytecode* pc) {
if (!script->hasScriptCounts()) {
return;
}
PCCounts* counts = script->maybeGetPCCounts(pc);
uint64_t* counterAddr = &counts->numExec();
masm.inc64(AbsoluteAddress(counterAddr));
}
template <>
bool BaselineCompilerCodeGen::emitHandleCodeCoverageAtPrologue() {
// If the main instruction is not a jump target, then we emit the
// corresponding code coverage counter.
JSScript* script = handler.script();
jsbytecode* main = script->main();
if (!BytecodeIsJumpTarget(JSOp(*main))) {
MaybeIncrementCodeCoverageCounter(masm, script, main);
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitHandleCodeCoverageAtPrologue() {
Label skipCoverage;
CodeOffset toggleOffset = masm.toggledJump(&skipCoverage);
masm.call(handler.codeCoverageAtPrologueLabel());
masm.bind(&skipCoverage);
return handler.codeCoverageOffsets().append(toggleOffset.offset());
}
template <>
void BaselineCompilerCodeGen::subtractScriptSlotsSize(Register reg,
Register scratch) {
uint32_t slotsSize = handler.script()->nslots() * sizeof(Value);
masm.subPtr(Imm32(slotsSize), reg);
}
template <>
void BaselineInterpreterCodeGen::subtractScriptSlotsSize(Register reg,
Register scratch) {
// reg = reg - script->nslots() * sizeof(Value)
MOZ_ASSERT(reg != scratch);
loadScript(scratch);
masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
scratch);
masm.load32(Address(scratch, ImmutableScriptData::offsetOfNslots()), scratch);
static_assert(sizeof(Value) == 8,
"shift by 3 below assumes Value is 8 bytes");
masm.lshiftPtr(Imm32(3), scratch);
masm.subPtr(scratch, reg);
}
template <>
void BaselineCompilerCodeGen::loadGlobalLexicalEnvironment(Register dest) {
MOZ_ASSERT(!handler.script()->hasNonSyntacticScope());
masm.movePtr(ImmGCPtr(&cx->global()->lexicalEnvironment()), dest);
}
template <>
void BaselineInterpreterCodeGen::loadGlobalLexicalEnvironment(Register dest) {
masm.loadPtr(AbsoluteAddress(cx->addressOfRealm()), dest);
masm.loadPtr(Address(dest, Realm::offsetOfActiveGlobal()), dest);
masm.loadPrivate(Address(dest, GlobalObject::offsetOfGlobalDataSlot()), dest);
masm.loadPtr(Address(dest, GlobalObjectData::offsetOfLexicalEnvironment()),
dest);
}
template <>
void BaselineCompilerCodeGen::pushGlobalLexicalEnvironmentValue(
ValueOperand scratch) {
frame.push(ObjectValue(cx->global()->lexicalEnvironment()));
}
template <>
void BaselineInterpreterCodeGen::pushGlobalLexicalEnvironmentValue(
ValueOperand scratch) {
loadGlobalLexicalEnvironment(scratch.scratchReg());
masm.tagValue(JSVAL_TYPE_OBJECT, scratch.scratchReg(), scratch);
frame.push(scratch);
}
template <>
void BaselineCompilerCodeGen::loadGlobalThisValue(ValueOperand dest) {
JSObject* thisObj = cx->global()->lexicalEnvironment().thisObject();
masm.moveValue(ObjectValue(*thisObj), dest);
}
template <>
void BaselineInterpreterCodeGen::loadGlobalThisValue(ValueOperand dest) {
Register scratch = dest.scratchReg();
loadGlobalLexicalEnvironment(scratch);
static constexpr size_t SlotOffset =
GlobalLexicalEnvironmentObject::offsetOfThisValueSlot();
masm.loadValue(Address(scratch, SlotOffset), dest);
}
template <>
void BaselineCompilerCodeGen::pushScriptArg() {
pushArg(ImmGCPtr(handler.script()));
}
template <>
void BaselineInterpreterCodeGen::pushScriptArg() {
pushArg(frame.addressOfInterpreterScript());
}
template <>
void BaselineCompilerCodeGen::pushBytecodePCArg() {
pushArg(ImmPtr(handler.pc()));
}
template <>
void BaselineInterpreterCodeGen::pushBytecodePCArg() {
if (HasInterpreterPCReg()) {
pushArg(InterpreterPCReg);
} else {
pushArg(frame.addressOfInterpreterPC());
}
}
static gc::Cell* GetScriptGCThing(JSScript* script, jsbytecode* pc,
ScriptGCThingType type) {
switch (type) {
case ScriptGCThingType::Atom:
return script->getAtom(pc);
case ScriptGCThingType::String:
return script->getString(pc);
case ScriptGCThingType::RegExp:
return script->getRegExp(pc);
case ScriptGCThingType::Object:
return script->getObject(pc);
case ScriptGCThingType::Function:
return script->getFunction(pc);
case ScriptGCThingType::Scope:
return script->getScope(pc);
case ScriptGCThingType::BigInt:
return script->getBigInt(pc);
}
MOZ_CRASH("Unexpected GCThing type");
}
template <>
void BaselineCompilerCodeGen::loadScriptGCThing(ScriptGCThingType type,
Register dest,
Register scratch) {
gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
masm.movePtr(ImmGCPtr(thing), dest);
}
template <>
void BaselineInterpreterCodeGen::loadScriptGCThing(ScriptGCThingType type,
Register dest,
Register scratch) {
MOZ_ASSERT(dest != scratch);
// Load the index in |scratch|.
LoadInt32Operand(masm, scratch);
// Load the GCCellPtr.
loadScript(dest);
masm.loadPtr(Address(dest, JSScript::offsetOfPrivateData()), dest);
masm.loadPtr(BaseIndex(dest, scratch, ScalePointer,
PrivateScriptData::offsetOfGCThings()),
dest);
// Clear the tag bits.
switch (type) {
case ScriptGCThingType::Atom:
case ScriptGCThingType::String:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::String) == 2,
"Unexpected tag bits for string GCCellPtr");
masm.xorPtr(Imm32(2), dest);
break;
case ScriptGCThingType::RegExp:
case ScriptGCThingType::Object:
case ScriptGCThingType::Function:
// No-op because GCCellPtr tag bits are zero for objects.
static_assert(uintptr_t(TraceKind::Object) == 0,
"Unexpected tag bits for object GCCellPtr");
break;
case ScriptGCThingType::BigInt:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::BigInt) == 1,
"Unexpected tag bits for BigInt GCCellPtr");
masm.xorPtr(Imm32(1), dest);
break;
case ScriptGCThingType::Scope:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::Scope) >= JS::OutOfLineTraceKindMask,
"Expected Scopes to have OutOfLineTraceKindMask tag");
masm.xorPtr(Imm32(JS::OutOfLineTraceKindMask), dest);
break;
}
#ifdef DEBUG
// Assert low bits are not set.
Label ok;
masm.branchTestPtr(Assembler::Zero, dest, Imm32(0b111), &ok);
masm.assumeUnreachable("GC pointer with tag bits set");
masm.bind(&ok);
#endif
}
template <>
void BaselineCompilerCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
Register scratch1,
Register scratch2) {
gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
pushArg(ImmGCPtr(thing));
}
template <>
void BaselineInterpreterCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
Register scratch1,
Register scratch2) {
loadScriptGCThing(type, scratch1, scratch2);
pushArg(scratch1);
}
template <typename Handler>
void BaselineCodeGen<Handler>::pushScriptNameArg(Register scratch1,
Register scratch2) {
pushScriptGCThingArg(ScriptGCThingType::Atom, scratch1, scratch2);
}
template <>
void BaselineCompilerCodeGen::pushUint8BytecodeOperandArg(Register) {
MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT8);
pushArg(Imm32(GET_UINT8(handler.pc())));
}
template <>
void BaselineInterpreterCodeGen::pushUint8BytecodeOperandArg(Register scratch) {
LoadUint8Operand(masm, scratch);
pushArg(scratch);
}
template <>
void BaselineCompilerCodeGen::pushUint16BytecodeOperandArg(Register) {
MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT16);
pushArg(Imm32(GET_UINT16(handler.pc())));
}
template <>
void BaselineInterpreterCodeGen::pushUint16BytecodeOperandArg(
Register scratch) {
LoadUint16Operand(masm, scratch);
pushArg(scratch);
}
template <>
void BaselineCompilerCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
uint32_t length = GET_UINT32(handler.pc());
MOZ_ASSERT(length <= INT32_MAX,
"the bytecode emitter must fail to compile code that would "
"produce a length exceeding int32_t range");
masm.move32(Imm32(AssertedCast<int32_t>(length)), dest);
}
template <>
void BaselineInterpreterCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
LoadInt32Operand(masm, dest);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::emitDebugPrologue() {
auto ifDebuggee = [this]() {
// Load pointer to BaselineFrame in R0.
masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
prepareVMCall();
pushArg(R0.scratchReg());
const RetAddrEntry::Kind kind = RetAddrEntry::Kind::DebugPrologue;
using Fn = bool (*)(JSContext*, BaselineFrame*);
if (!callVM<Fn, jit::DebugPrologue>(kind)) {
return false;
}
return true;
};
return emitDebugInstrumentation(ifDebuggee);
}
template <>
void BaselineCompilerCodeGen::emitInitFrameFields(Register nonFunctionEnv) {
Register scratch = R0.scratchReg();
Register scratch2 = R2.scratchReg();
MOZ_ASSERT(nonFunctionEnv != scratch && nonFunctionEnv != scratch2);
masm.store32(Imm32(0), frame.addressOfFlags());
if (handler.function()) {
masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), scratch);
masm.unboxObject(Address(scratch, JSFunction::offsetOfEnvironment()),
scratch);
masm.storePtr(scratch, frame.addressOfEnvironmentChain());
} else {
masm.storePtr(nonFunctionEnv, frame.addressOfEnvironmentChain());
}
// If cx->inlinedICScript contains an inlined ICScript (passed from
// the caller), take that ICScript and store it in the frame, then
// overwrite cx->inlinedICScript with nullptr.
Label notInlined, done;
masm.movePtr(ImmPtr(cx->addressOfInlinedICScript()), scratch);
Address inlinedAddr(scratch, 0);
masm.branchPtr(Assembler::Equal, inlinedAddr, ImmWord(0), &notInlined);
masm.loadPtr(inlinedAddr, scratch2);
masm.storePtr(scratch2, frame.addressOfICScript());
masm.storePtr(ImmPtr(nullptr), inlinedAddr);
masm.jump(&done);
// Otherwise, store this script's default ICSCript in the frame.
masm.bind(&notInlined);
masm.storePtr(ImmPtr(handler.script()->jitScript()->icScript()),
frame.addressOfICScript());
masm.bind(&done);
}
template <>
void BaselineInterpreterCodeGen::emitInitFrameFields(Register nonFunctionEnv) {
MOZ_ASSERT(nonFunctionEnv == R1.scratchReg(),
"Don't clobber nonFunctionEnv below");
// If we have a dedicated PC register we use it as scratch1 to avoid a
// register move below.
Register scratch1 =
HasInterpreterPCReg() ? InterpreterPCReg : R0.scratchReg();
Register scratch2 = R2.scratchReg();
masm.store32(Imm32(BaselineFrame::RUNNING_IN_INTERPRETER),
frame.addressOfFlags());
// Initialize interpreterScript.
Label notFunction, done;
masm.loadPtr(frame.addressOfCalleeToken(), scratch1);
masm.branchTestPtr(Assembler::NonZero, scratch1, Imm32(CalleeTokenScriptBit),
&notFunction);
{
// CalleeToken_Function or CalleeToken_FunctionConstructing.
masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch1);
masm.unboxObject(Address(scratch1, JSFunction::offsetOfEnvironment()),
scratch2);
masm.storePtr(scratch2, frame.addressOfEnvironmentChain());
masm.loadPrivate(Address(scratch1, JSFunction::offsetOfJitInfoOrScript()),
scratch1);
masm.jump(&done);
}
masm.bind(&notFunction);
{
// CalleeToken_Script.
masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), scratch1);
masm.storePtr(nonFunctionEnv, frame.addressOfEnvironmentChain());
}
masm.bind(&done);
masm.storePtr(scratch1, frame.addressOfInterpreterScript());
// Initialize icScript and interpreterICEntry
masm.loadJitScript(scratch1, scratch2);
masm.computeEffectiveAddress(Address(scratch2, JitScript::offsetOfICScript()),
scratch2);
masm.storePtr(scratch2, frame.addressOfICScript());
masm.computeEffectiveAddress(Address(scratch2, ICScript::offsetOfICEntries()),
scratch2);
masm.storePtr(scratch2, frame.addressOfInterpreterICEntry());
// Initialize interpreter pc.
masm.loadPtr(Address(scratch1, JSScript::offsetOfSharedData()), scratch1);
masm.loadPtr(Address(scratch1, SharedImmutableScriptData::offsetOfISD()),
scratch1);
masm.addPtr(Imm32(ImmutableScriptData::offsetOfCode()), scratch1);
if (HasInterpreterPCReg()) {
MOZ_ASSERT(scratch1 == InterpreterPCReg,
"pc must be stored in the pc register");
} else {
masm.storePtr(scratch1, frame.addressOfInterpreterPC());
}
}
template <>
template <typename F>
bool BaselineCompilerCodeGen::initEnvironmentChainHelper(
const F& initFunctionEnv) {
if (handler.function()) {
return initFunctionEnv();
}
return true;
}
template <>
template <typename F>
bool BaselineInterpreterCodeGen::initEnvironmentChainHelper(
const F& initFunctionEnv) {
// For function scripts use the code emitted by initFunctionEnv. For other
// scripts this is a no-op.
Label done;
masm.branchTestPtr(Assembler::NonZero, frame.addressOfCalleeToken(),
Imm32(CalleeTokenScriptBit), &done);
{
if (!initFunctionEnv()) {
return false;
}
}
masm.bind(&done);
return true;
}
template <typename Handler>
bool BaselineCodeGen<Handler>::initEnvironmentChain() {
auto initFunctionEnv = [this]() {
auto initEnv = [this]() {
// Call into the VM to create the proper environment objects.
prepareVMCall();
masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
pushArg(R0.scratchReg());
const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
using Fn = bool (*)(JSContext*, BaselineFrame*);
return callVMNonOp<Fn, jit::InitFunctionEnvironmentObjects>(phase);
};
return emitTestScriptFlag(
JSScript::ImmutableFlags::NeedsFunctionEnvironmentObjects, true,
initEnv, R2.scratchReg());
};
return initEnvironmentChainHelper(initFunctionEnv);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::emitInterruptCheck() {
frame.syncStack(0);
Label done;
masm.branch32(Assembler::Equal, AbsoluteAddress(cx->addressOfInterruptBits()),
Imm32(0), &done);
prepareVMCall();
// Use a custom RetAddrEntry::Kind so DebugModeOSR can distinguish this call
// from other callVMs that might happen at this pc.
const RetAddrEntry::Kind kind = RetAddrEntry::Kind::InterruptCheck;
using Fn = bool (*)(JSContext*);
if (!callVM<Fn, InterruptCheck>(kind)) {
return false;
}
masm.bind(&done);
return true;
}
template <>
bool BaselineCompilerCodeGen::emitWarmUpCounterIncrement() {
frame.assertSyncedStack();
// Record native code offset for OSR from Baseline Interpreter into Baseline
// JIT code. This is right before the warm-up check in the Baseline JIT code,
// to make sure we can immediately enter Ion if the script is warm enough or
// if --ion-eager is used.
JSScript* script = handler.script();
jsbytecode* pc = handler.pc();
if (JSOp(*pc) == JSOp::LoopHead) {
uint32_t pcOffset = script->pcToOffset(pc);
uint32_t nativeOffset = masm.currentOffset();
if (!handler.osrEntries().emplaceBack(pcOffset, nativeOffset)) {
ReportOutOfMemory(cx);
return false;
}
}
// Emit no warm-up counter increments if Ion is not enabled or if the script
// will never be Ion-compileable.
if (!handler.maybeIonCompileable()) {
return true;
}
Register scriptReg = R2.scratchReg();
Register countReg = R0.scratchReg();
// Load the ICScript* in scriptReg.
masm.loadPtr(frame.addressOfICScript(), scriptReg);
// Bump warm-up counter.
Address warmUpCounterAddr(scriptReg, ICScript::offsetOfWarmUpCount());
masm.load32(warmUpCounterAddr, countReg);
masm.add32(Imm32(1), countReg);
masm.store32(countReg, warmUpCounterAddr);
if (!JitOptions.disableInlining) {
// Consider trial inlining.
// Note: unlike other warmup thresholds, where we try to enter a
// higher tier whenever we are higher than a given warmup count,
// trial inlining triggers once when reaching the threshold.
Label noTrialInlining;
masm.branch32(Assembler::NotEqual, countReg,
Imm32(JitOptions.trialInliningWarmUpThreshold),
&noTrialInlining);
prepareVMCall();
masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
using Fn = bool (*)(JSContext*, BaselineFrame*);
if (!callVMNonOp<Fn, DoTrialInlining>()) {
return false;
}
// Reload registers potentially clobbered by the call.
masm.loadPtr(frame.addressOfICScript(), scriptReg);
masm.load32(warmUpCounterAddr, countReg);
masm.bind(&noTrialInlining);
}
if (JSOp(*pc) == JSOp::LoopHead) {
// If this is a loop where we can't OSR (for example because it's inside a
// catch or finally block), increment the warmup counter but don't attempt
// OSR (Ion/Warp only compiles the try block).
if (!handler.analysis().info(pc).loopHeadCanOsr) {
return true;
}
}
Label done;
const OptimizationInfo* info =
IonOptimizations.get(OptimizationLevel::Normal);
uint32_t warmUpThreshold = info->compilerWarmUpThreshold(script, pc);
masm.branch32(Assembler::LessThan, countReg, Imm32(warmUpThreshold), &done);
// Don't trigger Warp compilations from trial-inlined scripts.
Address depthAddr(scriptReg, ICScript::offsetOfDepth());
masm.branch32(Assembler::NotEqual, depthAddr, Imm32(0), &done);
// Load the IonScript* in scriptReg. We can load this from the ICScript*
// because it must be an outer ICScript embedded in the JitScript.
constexpr int32_t offset = -int32_t(JitScript::offsetOfICScript()) +
int32_t(JitScript::offsetOfIonScript());
masm.loadPtr(Address(scriptReg, offset), scriptReg);
// Do nothing if Ion is already compiling this script off-thread or if Ion has
// been disabled for this script.
masm.branchPtr(Assembler::Equal, scriptReg, ImmPtr(IonCompilingScriptPtr),
&done);
masm.branchPtr(Assembler::Equal, scriptReg, ImmPtr(IonDisabledScriptPtr),
&done);
// Try to compile and/or finish a compilation.
if (JSOp(*pc) == JSOp::LoopHead) {
// Try to OSR into Ion.
computeFrameSize(R0.scratchReg());
prepareVMCall();
pushBytecodePCArg();
pushArg(R0.scratchReg());
masm.PushBaselineFramePtr(FramePointer, R0.scratchReg());
using Fn = bool (*)(JSContext*, BaselineFrame*, uint32_t, jsbytecode*,