Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineCodeGen.h"
#include "mozilla/Casting.h"
#include "gc/GC.h"
#include "jit/BaselineIC.h"
#include "jit/BaselineJIT.h"
#include "jit/CacheIRCompiler.h"
#include "jit/CacheIRGenerator.h"
#include "jit/CalleeToken.h"
#include "jit/FixedList.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitcodeMap.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/PerfSpewer.h"
#include "jit/SharedICHelpers.h"
#include "jit/TemplateObject.h"
#include "jit/TrialInlining.h"
#include "jit/VMFunctions.h"
#include "js/friend/ErrorMessages.h" // JSMSG_*
#include "js/UniquePtr.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/BuiltinObjectKind.h"
#include "vm/EnvironmentObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Interpreter.h"
#include "vm/JSFunction.h"
#include "vm/Logging.h"
#include "vm/Time.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "debugger/DebugAPI-inl.h"
#include "jit/BaselineFrameInfo-inl.h"
#include "jit/JitHints-inl.h"
#include "jit/JitScript-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/TemplateObject-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::TraceKind;
using mozilla::AssertedCast;
using mozilla::Maybe;
namespace js {
class PlainObject;
namespace jit {
BaselineCompilerHandler::BaselineCompilerHandler(JSContext* cx,
MacroAssembler& masm,
TempAllocator& alloc,
JSScript* script)
: frame_(script, masm),
alloc_(alloc),
analysis_(alloc, script),
#ifdef DEBUG
masm_(masm),
#endif
script_(script),
pc_(script->code()),
icEntryIndex_(0),
compileDebugInstrumentation_(script->isDebuggee()),
ionCompileable_(IsIonEnabled(cx) && CanIonCompileScript(cx, script)) {
}
BaselineInterpreterHandler::BaselineInterpreterHandler(JSContext* cx,
MacroAssembler& masm)
: frame_(masm) {}
template <typename Handler>
template <typename... HandlerArgs>
BaselineCodeGen<Handler>::BaselineCodeGen(JSContext* cx, TempAllocator& alloc,
HandlerArgs&&... args)
: handler(cx, masm, std::forward<HandlerArgs>(args)...),
cx(cx),
masm(cx, alloc),
frame(handler.frame()) {}
BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc,
JSScript* script)
: BaselineCodeGen(cx, alloc, /* HandlerArgs = */ alloc, script) {
#ifdef JS_CODEGEN_NONE
MOZ_CRASH();
#endif
}
BaselineInterpreterGenerator::BaselineInterpreterGenerator(JSContext* cx,
TempAllocator& alloc)
: BaselineCodeGen(cx, alloc /* no handlerArgs */) {}
bool BaselineCompilerHandler::init(JSContext* cx) {
JS_LOG(baselineCompileHandler, mozilla::LogLevel::Debug,
"Baseline Compile Init");
if (!analysis_.init(alloc_)) {
return false;
}
uint32_t len = script_->length();
if (!labels_.init(alloc_, len)) {
return false;
}
for (size_t i = 0; i < len; i++) {
new (&labels_[i]) Label();
}
if (!frame_.init(alloc_)) {
return false;
}
return true;
}
bool BaselineCompiler::init() {
if (!handler.init(cx)) {
return false;
}
return true;
}
bool BaselineCompilerHandler::recordCallRetAddr(JSContext* cx,
RetAddrEntry::Kind kind,
uint32_t retOffset) {
uint32_t pcOffset = script_->pcToOffset(pc_);
// Entries must be sorted by pcOffset for binary search to work.
// See BaselineScript::retAddrEntryFromPCOffset.
MOZ_ASSERT_IF(!retAddrEntries_.empty(),
retAddrEntries_.back().pcOffset() <= pcOffset);
// Similarly, entries must be sorted by return offset and this offset must be
// unique. See BaselineScript::retAddrEntryFromReturnOffset.
MOZ_ASSERT_IF(!retAddrEntries_.empty() && !masm_.oom(),
retAddrEntries_.back().returnOffset().offset() < retOffset);
if (!retAddrEntries_.emplaceBack(pcOffset, kind, CodeOffset(retOffset))) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
bool BaselineInterpreterHandler::recordCallRetAddr(JSContext* cx,
RetAddrEntry::Kind kind,
uint32_t retOffset) {
switch (kind) {
case RetAddrEntry::Kind::DebugPrologue:
MOZ_ASSERT(callVMOffsets_.debugPrologueOffset == 0,
"expected single DebugPrologue call");
callVMOffsets_.debugPrologueOffset = retOffset;
break;
case RetAddrEntry::Kind::DebugEpilogue:
MOZ_ASSERT(callVMOffsets_.debugEpilogueOffset == 0,
"expected single DebugEpilogue call");
callVMOffsets_.debugEpilogueOffset = retOffset;
break;
case RetAddrEntry::Kind::DebugAfterYield:
MOZ_ASSERT(callVMOffsets_.debugAfterYieldOffset == 0,
"expected single DebugAfterYield call");
callVMOffsets_.debugAfterYieldOffset = retOffset;
break;
default:
break;
}
return true;
}
bool BaselineInterpreterHandler::addDebugInstrumentationOffset(
JSContext* cx, CodeOffset offset) {
if (!debugInstrumentationOffsets_.append(offset.offset())) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
MethodStatus BaselineCompiler::compile() {
AutoCreatedBy acb(masm, "BaselineCompiler::compile");
Rooted<JSScript*> script(cx, handler.script());
JitSpew(JitSpew_BaselineScripts, "Baseline compiling script %s:%u:%u (%p)",
script->filename(), script->lineno(),
script->column().oneOriginValue(), script.get());
JitSpew(JitSpew_Codegen, "# Emitting baseline code for script %s:%u:%u",
script->filename(), script->lineno(),
script->column().oneOriginValue());
AutoIncrementalTimer timer(cx->realm()->timers.baselineCompileTime);
AutoKeepJitScripts keepJitScript(cx);
if (!script->ensureHasJitScript(cx, keepJitScript)) {
return Method_Error;
}
// When code coverage is enabled, we have to create the ScriptCounts if they
// do not exist.
if (!script->hasScriptCounts() && cx->realm()->collectCoverageForDebug()) {
if (!script->initScriptCounts(cx)) {
return Method_Error;
}
}
if (!JitOptions.disableJitHints &&
cx->runtime()->jitRuntime()->hasJitHintsMap()) {
JitHintsMap* jitHints = cx->runtime()->jitRuntime()->getJitHintsMap();
jitHints->setEagerBaselineHint(script);
}
// Suppress GC during compilation.
gc::AutoSuppressGC suppressGC(cx);
if (!script->jitScript()->ensureHasCachedBaselineJitData(cx, script)) {
return Method_Error;
}
MOZ_ASSERT(!script->hasBaselineScript());
perfSpewer_.recordOffset(masm, "Prologue");
if (!emitPrologue()) {
return Method_Error;
}
MethodStatus status = emitBody();
if (status != Method_Compiled) {
return status;
}
perfSpewer_.recordOffset(masm, "Epilogue");
if (!emitEpilogue()) {
return Method_Error;
}
perfSpewer_.recordOffset(masm, "OOLPostBarrierSlot");
if (!emitOutOfLinePostBarrierSlot()) {
return Method_Error;
}
AutoCreatedBy acb2(masm, "exception_tail");
Linker linker(masm);
if (masm.oom()) {
ReportOutOfMemory(cx);
return Method_Error;
}
JitCode* code = linker.newCode(cx, CodeKind::Baseline);
if (!code) {
return Method_Error;
}
UniquePtr<BaselineScript> baselineScript(
BaselineScript::New(
cx, warmUpCheckPrologueOffset_.offset(),
profilerEnterFrameToggleOffset_.offset(),
profilerExitFrameToggleOffset_.offset(),
handler.retAddrEntries().length(), handler.osrEntries().length(),
debugTrapEntries_.length(), script->resumeOffsets().size()),
JS::DeletePolicy<BaselineScript>(cx->runtime()));
if (!baselineScript) {
return Method_Error;
}
baselineScript->setMethod(code);
JitSpew(JitSpew_BaselineScripts,
"Created BaselineScript %p (raw %p) for %s:%u:%u",
(void*)baselineScript.get(), (void*)code->raw(), script->filename(),
script->lineno(), script->column().oneOriginValue());
baselineScript->copyRetAddrEntries(handler.retAddrEntries().begin());
baselineScript->copyOSREntries(handler.osrEntries().begin());
baselineScript->copyDebugTrapEntries(debugTrapEntries_.begin());
// If profiler instrumentation is enabled, toggle instrumentation on.
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(
cx->runtime())) {
baselineScript->toggleProfilerInstrumentation(true);
}
// Compute native resume addresses for the script's resume offsets.
baselineScript->computeResumeNativeOffsets(script, resumeOffsetEntries_);
if (compileDebugInstrumentation()) {
baselineScript->setHasDebugInstrumentation();
}
// Always register a native => bytecode mapping entry, since profiler can be
// turned on with baseline jitcode on stack, and baseline jitcode cannot be
// invalidated.
{
JitSpew(JitSpew_Profiling,
"Added JitcodeGlobalEntry for baseline script %s:%u:%u (%p)",
script->filename(), script->lineno(),
script->column().oneOriginValue(), baselineScript.get());
// Generate profiling string.
UniqueChars str = GeckoProfilerRuntime::allocProfileString(cx, script);
if (!str) {
return Method_Error;
}
auto entry = MakeJitcodeGlobalEntry<BaselineEntry>(
cx, code, code->raw(), code->rawEnd(), script, std::move(str));
if (!entry) {
return Method_Error;
}
JitcodeGlobalTable* globalTable =
cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(std::move(entry))) {
ReportOutOfMemory(cx);
return Method_Error;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
script->jitScript()->setBaselineScript(script, baselineScript.release());
perfSpewer_.saveProfile(cx, script, code);
#ifdef MOZ_VTUNE
vtune::MarkScript(code, script, "baseline");
#endif
return Method_Compiled;
}
// On most platforms we use a dedicated bytecode PC register to avoid many
// dependent loads and stores for sequences of simple bytecode ops. This
// register must be saved/restored around VM and IC calls.
//
// On 32-bit x86 we don't have enough registers for this (because R0-R2 require
// 6 registers) so there we always store the pc on the frame.
static constexpr bool HasInterpreterPCReg() {
return InterpreterPCReg != InvalidReg;
}
static Register LoadBytecodePC(MacroAssembler& masm, Register scratch) {
if (HasInterpreterPCReg()) {
return InterpreterPCReg;
}
Address pcAddr(FramePointer, BaselineFrame::reverseOffsetOfInterpreterPC());
masm.loadPtr(pcAddr, scratch);
return scratch;
}
static void LoadInt8Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load8SignExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint8Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load8ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint16Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load16ZeroExtend(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadInt32Operand(MacroAssembler& masm, Register dest) {
Register pc = LoadBytecodePC(masm, dest);
masm.load32(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadInt32OperandSignExtendToPtr(MacroAssembler& masm, Register pc,
Register dest) {
masm.load32SignExtendToPtr(Address(pc, sizeof(jsbytecode)), dest);
}
static void LoadUint24Operand(MacroAssembler& masm, size_t offset,
Register dest) {
// Load the opcode and operand, then left shift to discard the opcode.
Register pc = LoadBytecodePC(masm, dest);
masm.load32(Address(pc, offset), dest);
masm.rshift32(Imm32(8), dest);
}
static void LoadInlineValueOperand(MacroAssembler& masm, ValueOperand dest) {
// Note: the Value might be unaligned but as above we rely on all our
// platforms having appropriate support for unaligned accesses (except for
// floating point instructions on ARM).
Register pc = LoadBytecodePC(masm, dest.scratchReg());
masm.loadUnalignedValue(Address(pc, sizeof(jsbytecode)), dest);
}
template <>
void BaselineCompilerCodeGen::loadScript(Register dest) {
masm.movePtr(ImmGCPtr(handler.script()), dest);
}
template <>
void BaselineInterpreterCodeGen::loadScript(Register dest) {
masm.loadPtr(frame.addressOfInterpreterScript(), dest);
}
template <>
void BaselineCompilerCodeGen::saveInterpreterPCReg() {}
template <>
void BaselineInterpreterCodeGen::saveInterpreterPCReg() {
if (HasInterpreterPCReg()) {
masm.storePtr(InterpreterPCReg, frame.addressOfInterpreterPC());
}
}
template <>
void BaselineCompilerCodeGen::restoreInterpreterPCReg() {}
template <>
void BaselineInterpreterCodeGen::restoreInterpreterPCReg() {
if (HasInterpreterPCReg()) {
masm.loadPtr(frame.addressOfInterpreterPC(), InterpreterPCReg);
}
}
template <>
void BaselineCompilerCodeGen::emitInitializeLocals() {
// Initialize all locals to |undefined|. Lexical bindings are temporal
// dead zoned in bytecode.
size_t n = frame.nlocals();
if (n == 0) {
return;
}
// Use R0 to minimize code size. If the number of locals to push is <
// LOOP_UNROLL_FACTOR, then the initialization pushes are emitted directly
// and inline. Otherwise, they're emitted in a partially unrolled loop.
static const size_t LOOP_UNROLL_FACTOR = 4;
size_t toPushExtra = n % LOOP_UNROLL_FACTOR;
masm.moveValue(UndefinedValue(), R0);
// Handle any extra pushes left over by the optional unrolled loop below.
for (size_t i = 0; i < toPushExtra; i++) {
masm.pushValue(R0);
}
// Partially unrolled loop of pushes.
if (n >= LOOP_UNROLL_FACTOR) {
size_t toPush = n - toPushExtra;
MOZ_ASSERT(toPush % LOOP_UNROLL_FACTOR == 0);
MOZ_ASSERT(toPush >= LOOP_UNROLL_FACTOR);
masm.move32(Imm32(toPush), R1.scratchReg());
// Emit unrolled loop with 4 pushes per iteration.
Label pushLoop;
masm.bind(&pushLoop);
for (size_t i = 0; i < LOOP_UNROLL_FACTOR; i++) {
masm.pushValue(R0);
}
masm.branchSub32(Assembler::NonZero, Imm32(LOOP_UNROLL_FACTOR),
R1.scratchReg(), &pushLoop);
}
}
template <>
void BaselineInterpreterCodeGen::emitInitializeLocals() {
// Push |undefined| for all locals.
Register scratch = R0.scratchReg();
loadScript(scratch);
masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
scratch);
masm.load32(Address(scratch, ImmutableScriptData::offsetOfNfixed()), scratch);
Label top, done;
masm.branchTest32(Assembler::Zero, scratch, scratch, &done);
masm.bind(&top);
{
masm.pushValue(UndefinedValue());
masm.branchSub32(Assembler::NonZero, Imm32(1), scratch, &top);
}
masm.bind(&done);
}
// On input:
// R2.scratchReg() contains object being written to.
// Called with the baseline stack synced, except for R0 which is preserved.
// All other registers are usable as scratch.
// This calls:
// void PostWriteBarrier(JSRuntime* rt, JSObject* obj);
template <typename Handler>
bool BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot() {
AutoCreatedBy acb(masm,
"BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot");
if (!postBarrierSlot_.used()) {
return true;
}
masm.bind(&postBarrierSlot_);
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
Register objReg = R2.scratchReg();
// Check one element cache to avoid VM call.
Label skipBarrier;
auto* lastCellAddr = cx->runtime()->gc.addressOfLastBufferedWholeCell();
masm.branchPtr(Assembler::Equal, AbsoluteAddress(lastCellAddr), objReg,
&skipBarrier);
saveInterpreterPCReg();
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
MOZ_ASSERT(!regs.has(FramePointer));
regs.take(R0);
regs.take(objReg);
Register scratch = regs.takeAny();
masm.pushValue(R0);
using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmPtr(cx->runtime()), scratch);
masm.passABIArg(scratch);
masm.passABIArg(objReg);
masm.callWithABI<Fn, PostWriteBarrier>();
restoreInterpreterPCReg();
masm.popValue(R0);
masm.bind(&skipBarrier);
masm.ret();
return true;
}
// Scan the a cache IR stub's fields and create an allocation site for any that
// refer to the catch-all unknown allocation site. This will be the case for
// stubs created when running in the interpreter. This happens on transition to
// baseline.
static bool CreateAllocSitesForCacheIRStub(JSScript* script, uint32_t pcOffset,
ICCacheIRStub* stub) {
const CacheIRStubInfo* stubInfo = stub->stubInfo();
uint8_t* stubData = stub->stubDataStart();
ICScript* icScript = script->jitScript()->icScript();
uint32_t field = 0;
size_t offset = 0;
while (true) {
StubField::Type fieldType = stubInfo->fieldType(field);
if (fieldType == StubField::Type::Limit) {
break;
}
if (fieldType == StubField::Type::AllocSite) {
gc::AllocSite* site =
stubInfo->getPtrStubField<ICCacheIRStub, gc::AllocSite>(stub, offset);
if (site->kind() == gc::AllocSite::Kind::Unknown) {
gc::AllocSite* newSite =
icScript->getOrCreateAllocSite(script, pcOffset);
if (!newSite) {
return false;
}
stubInfo->replaceStubRawWord(stubData, offset, uintptr_t(site),
uintptr_t(newSite));
}
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
return true;
}
static void CreateAllocSitesForICChain(JSScript* script, uint32_t pcOffset,
uint32_t entryIndex) {
JitScript* jitScript = script->jitScript();
ICStub* stub = jitScript->icEntry(entryIndex).firstStub();
while (!stub->isFallback()) {
if (!CreateAllocSitesForCacheIRStub(script, pcOffset,
stub->toCacheIRStub())) {
// This is an optimization and safe to skip if we hit OOM or per-zone
// limit.
return;
}
stub = stub->toCacheIRStub()->next();
}
}
template <>
bool BaselineCompilerCodeGen::emitNextIC() {
AutoCreatedBy acb(masm, "emitNextIC");
// Emit a call to an IC stored in JitScript. Calls to this must match the
// ICEntry order in JitScript: first the non-op IC entries for |this| and
// formal arguments, then the for-op IC entries for JOF_IC ops.
JSScript* script = handler.script();
uint32_t pcOffset = script->pcToOffset(handler.pc());
// We don't use every ICEntry and we can skip unreachable ops, so we have
// to loop until we find an ICEntry for the current pc.
const ICFallbackStub* stub;
uint32_t entryIndex;
do {
stub = script->jitScript()->fallbackStub(handler.icEntryIndex());
entryIndex = handler.icEntryIndex();
handler.moveToNextICEntry();
} while (stub->pcOffset() < pcOffset);
MOZ_ASSERT(stub->pcOffset() == pcOffset);
MOZ_ASSERT(BytecodeOpHasIC(JSOp(*handler.pc())));
if (BytecodeOpCanHaveAllocSite(JSOp(*handler.pc()))) {
CreateAllocSitesForICChain(script, pcOffset, entryIndex);
}
// Load stub pointer into ICStubReg.
masm.loadPtr(frame.addressOfICScript(), ICStubReg);
size_t firstStubOffset = ICScript::offsetOfFirstStub(entryIndex);
masm.loadPtr(Address(ICStubReg, firstStubOffset), ICStubReg);
CodeOffset returnOffset;
EmitCallIC(masm, &returnOffset);
RetAddrEntry::Kind kind = RetAddrEntry::Kind::IC;
if (!handler.retAddrEntries().emplaceBack(pcOffset, kind, returnOffset)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitNextIC() {
saveInterpreterPCReg();
masm.loadPtr(frame.addressOfInterpreterICEntry(), ICStubReg);
masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
uint32_t returnOffset = masm.currentOffset();
restoreInterpreterPCReg();
// If this is an IC for a bytecode op where Ion may inline scripts, we need to
// record the return offset for Ion bailouts.
if (handler.currentOp()) {
JSOp op = *handler.currentOp();
MOZ_ASSERT(BytecodeOpHasIC(op));
if (IsIonInlinableOp(op)) {
if (!handler.icReturnOffsets().emplaceBack(returnOffset, op)) {
return false;
}
}
}
return true;
}
template <>
void BaselineCompilerCodeGen::computeFrameSize(Register dest) {
MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
masm.move32(Imm32(frame.frameSize()), dest);
}
template <>
void BaselineInterpreterCodeGen::computeFrameSize(Register dest) {
// dest := FramePointer - StackPointer.
MOZ_ASSERT(!inCall_, "must not be called in the middle of a VM call");
masm.mov(FramePointer, dest);
masm.subStackPtrFrom(dest);
}
template <typename Handler>
void BaselineCodeGen<Handler>::prepareVMCall() {
pushedBeforeCall_ = masm.framePushed();
#ifdef DEBUG
inCall_ = true;
#endif
// Ensure everything is synced.
frame.syncStack(0);
}
template <>
void BaselineCompilerCodeGen::storeFrameSizeAndPushDescriptor(
uint32_t argSize, Register scratch) {
#ifdef DEBUG
masm.store32(Imm32(frame.frameSize()), frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
template <>
void BaselineInterpreterCodeGen::storeFrameSizeAndPushDescriptor(
uint32_t argSize, Register scratch) {
#ifdef DEBUG
// Store the frame size without VMFunction arguments in debug builds.
// scratch := FramePointer - StackPointer - argSize.
masm.mov(FramePointer, scratch);
masm.subStackPtrFrom(scratch);
masm.sub32(Imm32(argSize), scratch);
masm.store32(scratch, frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
static uint32_t GetVMFunctionArgSize(const VMFunctionData& fun) {
return fun.explicitStackSlots() * sizeof(void*);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::callVMInternal(VMFunctionId id,
RetAddrEntry::Kind kind,
CallVMPhase phase) {
#ifdef DEBUG
// Assert prepareVMCall() has been called.
MOZ_ASSERT(inCall_);
inCall_ = false;
#endif
TrampolinePtr code = cx->runtime()->jitRuntime()->getVMWrapper(id);
const VMFunctionData& fun = GetVMFunction(id);
uint32_t argSize = GetVMFunctionArgSize(fun);
// Assert all arguments were pushed.
MOZ_ASSERT(masm.framePushed() - pushedBeforeCall_ == argSize);
saveInterpreterPCReg();
if (phase == CallVMPhase::AfterPushingLocals) {
storeFrameSizeAndPushDescriptor(argSize, R0.scratchReg());
} else {
MOZ_ASSERT(phase == CallVMPhase::BeforePushingLocals);
#ifdef DEBUG
uint32_t frameBaseSize = BaselineFrame::frameSizeForNumValueSlots(0);
masm.store32(Imm32(frameBaseSize), frame.addressOfDebugFrameSize());
#endif
masm.pushFrameDescriptor(FrameType::BaselineJS);
}
// Perform the call.
masm.call(code);
uint32_t callOffset = masm.currentOffset();
// Pop arguments from framePushed.
masm.implicitPop(argSize);
restoreInterpreterPCReg();
return handler.recordCallRetAddr(cx, kind, callOffset);
}
template <typename Handler>
template <typename Fn, Fn fn>
bool BaselineCodeGen<Handler>::callVM(RetAddrEntry::Kind kind,
CallVMPhase phase) {
VMFunctionId fnId = VMFunctionToId<Fn, fn>::id;
return callVMInternal(fnId, kind, phase);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::emitStackCheck() {
Label skipCall;
if (handler.mustIncludeSlotsInStackCheck()) {
// Subtract the size of script->nslots() first.
Register scratch = R1.scratchReg();
masm.moveStackPtrTo(scratch);
subtractScriptSlotsSize(scratch, R2.scratchReg());
masm.branchPtr(Assembler::BelowOrEqual,
AbsoluteAddress(cx->addressOfJitStackLimit()), scratch,
&skipCall);
} else {
masm.branchStackPtrRhs(Assembler::BelowOrEqual,
AbsoluteAddress(cx->addressOfJitStackLimit()),
&skipCall);
}
prepareVMCall();
masm.loadBaselineFramePtr(FramePointer, R1.scratchReg());
pushArg(R1.scratchReg());
const CallVMPhase phase = CallVMPhase::BeforePushingLocals;
const RetAddrEntry::Kind kind = RetAddrEntry::Kind::StackCheck;
using Fn = bool (*)(JSContext*, BaselineFrame*);
if (!callVM<Fn, CheckOverRecursedBaseline>(kind, phase)) {
return false;
}
masm.bind(&skipCall);
return true;
}
static void EmitCallFrameIsDebuggeeCheck(MacroAssembler& masm) {
using Fn = void (*)(BaselineFrame* frame);
masm.setupUnalignedABICall(R0.scratchReg());
masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
masm.passABIArg(R0.scratchReg());
masm.callWithABI<Fn, FrameIsDebuggeeCheck>();
}
template <>
bool BaselineCompilerCodeGen::emitIsDebuggeeCheck() {
if (handler.compileDebugInstrumentation()) {
EmitCallFrameIsDebuggeeCheck(masm);
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitIsDebuggeeCheck() {
// Use a toggled jump to call FrameIsDebuggeeCheck only if the debugger is
// enabled.
//
// TODO(bug 1522394): consider having a cx->realm->isDebuggee guard before the
// call. Consider moving the callWithABI out-of-line.
Label skipCheck;
CodeOffset toggleOffset = masm.toggledJump(&skipCheck);
{
saveInterpreterPCReg();
EmitCallFrameIsDebuggeeCheck(masm);
restoreInterpreterPCReg();
}
masm.bind(&skipCheck);
return handler.addDebugInstrumentationOffset(cx, toggleOffset);
}
static void MaybeIncrementCodeCoverageCounter(MacroAssembler& masm,
JSScript* script,
jsbytecode* pc) {
if (!script->hasScriptCounts()) {
return;
}
PCCounts* counts = script->maybeGetPCCounts(pc);
uint64_t* counterAddr = &counts->numExec();
masm.inc64(AbsoluteAddress(counterAddr));
}
template <>
bool BaselineCompilerCodeGen::emitHandleCodeCoverageAtPrologue() {
// If the main instruction is not a jump target, then we emit the
// corresponding code coverage counter.
JSScript* script = handler.script();
jsbytecode* main = script->main();
if (!BytecodeIsJumpTarget(JSOp(*main))) {
MaybeIncrementCodeCoverageCounter(masm, script, main);
}
return true;
}
template <>
bool BaselineInterpreterCodeGen::emitHandleCodeCoverageAtPrologue() {
Label skipCoverage;
CodeOffset toggleOffset = masm.toggledJump(&skipCoverage);
masm.call(handler.codeCoverageAtPrologueLabel());
masm.bind(&skipCoverage);
return handler.codeCoverageOffsets().append(toggleOffset.offset());
}
template <>
void BaselineCompilerCodeGen::subtractScriptSlotsSize(Register reg,
Register scratch) {
uint32_t slotsSize = handler.script()->nslots() * sizeof(Value);
masm.subPtr(Imm32(slotsSize), reg);
}
template <>
void BaselineInterpreterCodeGen::subtractScriptSlotsSize(Register reg,
Register scratch) {
// reg = reg - script->nslots() * sizeof(Value)
MOZ_ASSERT(reg != scratch);
loadScript(scratch);
masm.loadPtr(Address(scratch, JSScript::offsetOfSharedData()), scratch);
masm.loadPtr(Address(scratch, SharedImmutableScriptData::offsetOfISD()),
scratch);
masm.load32(Address(scratch, ImmutableScriptData::offsetOfNslots()), scratch);
static_assert(sizeof(Value) == 8,
"shift by 3 below assumes Value is 8 bytes");
masm.lshiftPtr(Imm32(3), scratch);
masm.subPtr(scratch, reg);
}
template <>
void BaselineCompilerCodeGen::loadGlobalLexicalEnvironment(Register dest) {
MOZ_ASSERT(!handler.script()->hasNonSyntacticScope());
masm.movePtr(ImmGCPtr(&cx->global()->lexicalEnvironment()), dest);
}
template <>
void BaselineInterpreterCodeGen::loadGlobalLexicalEnvironment(Register dest) {
masm.loadGlobalObjectData(dest);
masm.loadPtr(Address(dest, GlobalObjectData::offsetOfLexicalEnvironment()),
dest);
}
template <>
void BaselineCompilerCodeGen::pushGlobalLexicalEnvironmentValue(
ValueOperand scratch) {
frame.push(ObjectValue(cx->global()->lexicalEnvironment()));
}
template <>
void BaselineInterpreterCodeGen::pushGlobalLexicalEnvironmentValue(
ValueOperand scratch) {
loadGlobalLexicalEnvironment(scratch.scratchReg());
masm.tagValue(JSVAL_TYPE_OBJECT, scratch.scratchReg(), scratch);
frame.push(scratch);
}
template <>
void BaselineCompilerCodeGen::loadGlobalThisValue(ValueOperand dest) {
JSObject* thisObj = cx->global()->lexicalEnvironment().thisObject();
masm.moveValue(ObjectValue(*thisObj), dest);
}
template <>
void BaselineInterpreterCodeGen::loadGlobalThisValue(ValueOperand dest) {
Register scratch = dest.scratchReg();
loadGlobalLexicalEnvironment(scratch);
static constexpr size_t SlotOffset =
GlobalLexicalEnvironmentObject::offsetOfThisValueSlot();
masm.loadValue(Address(scratch, SlotOffset), dest);
}
template <>
void BaselineCompilerCodeGen::pushScriptArg() {
pushArg(ImmGCPtr(handler.script()));
}
template <>
void BaselineInterpreterCodeGen::pushScriptArg() {
pushArg(frame.addressOfInterpreterScript());
}
template <>
void BaselineCompilerCodeGen::pushBytecodePCArg() {
pushArg(ImmPtr(handler.pc()));
}
template <>
void BaselineInterpreterCodeGen::pushBytecodePCArg() {
if (HasInterpreterPCReg()) {
pushArg(InterpreterPCReg);
} else {
pushArg(frame.addressOfInterpreterPC());
}
}
static gc::Cell* GetScriptGCThing(JSScript* script, jsbytecode* pc,
ScriptGCThingType type) {
switch (type) {
case ScriptGCThingType::Atom:
return script->getAtom(pc);
case ScriptGCThingType::String:
return script->getString(pc);
case ScriptGCThingType::RegExp:
return script->getRegExp(pc);
case ScriptGCThingType::Object:
return script->getObject(pc);
case ScriptGCThingType::Function:
return script->getFunction(pc);
case ScriptGCThingType::Scope:
return script->getScope(pc);
case ScriptGCThingType::BigInt:
return script->getBigInt(pc);
}
MOZ_CRASH("Unexpected GCThing type");
}
template <>
void BaselineCompilerCodeGen::loadScriptGCThing(ScriptGCThingType type,
Register dest,
Register scratch) {
gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
masm.movePtr(ImmGCPtr(thing), dest);
}
template <>
void BaselineInterpreterCodeGen::loadScriptGCThing(ScriptGCThingType type,
Register dest,
Register scratch) {
MOZ_ASSERT(dest != scratch);
// Load the index in |scratch|.
LoadInt32Operand(masm, scratch);
// Load the GCCellPtr.
loadScript(dest);
masm.loadPtr(Address(dest, JSScript::offsetOfPrivateData()), dest);
masm.loadPtr(BaseIndex(dest, scratch, ScalePointer,
PrivateScriptData::offsetOfGCThings()),
dest);
// Clear the tag bits.
switch (type) {
case ScriptGCThingType::Atom:
case ScriptGCThingType::String:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::String) == 2,
"Unexpected tag bits for string GCCellPtr");
masm.xorPtr(Imm32(2), dest);
break;
case ScriptGCThingType::RegExp:
case ScriptGCThingType::Object:
case ScriptGCThingType::Function:
// No-op because GCCellPtr tag bits are zero for objects.
static_assert(uintptr_t(TraceKind::Object) == 0,
"Unexpected tag bits for object GCCellPtr");
break;
case ScriptGCThingType::BigInt:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::BigInt) == 1,
"Unexpected tag bits for BigInt GCCellPtr");
masm.xorPtr(Imm32(1), dest);
break;
case ScriptGCThingType::Scope:
// Use xorPtr with a 32-bit immediate because it's more efficient than
// andPtr on 64-bit.
static_assert(uintptr_t(TraceKind::Scope) >= JS::OutOfLineTraceKindMask,
"Expected Scopes to have OutOfLineTraceKindMask tag");
masm.xorPtr(Imm32(JS::OutOfLineTraceKindMask), dest);
break;
}
#ifdef DEBUG
// Assert low bits are not set.
Label ok;
masm.branchTestPtr(Assembler::Zero, dest, Imm32(0b111), &ok);
masm.assumeUnreachable("GC pointer with tag bits set");
masm.bind(&ok);
#endif
}
template <>
void BaselineCompilerCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
Register scratch1,
Register scratch2) {
gc::Cell* thing = GetScriptGCThing(handler.script(), handler.pc(), type);
pushArg(ImmGCPtr(thing));
}
template <>
void BaselineInterpreterCodeGen::pushScriptGCThingArg(ScriptGCThingType type,
Register scratch1,
Register scratch2) {
loadScriptGCThing(type, scratch1, scratch2);
pushArg(scratch1);
}
template <typename Handler>
void BaselineCodeGen<Handler>::pushScriptNameArg(Register scratch1,
Register scratch2) {
pushScriptGCThingArg(ScriptGCThingType::Atom, scratch1, scratch2);
}
template <>
void BaselineCompilerCodeGen::pushUint8BytecodeOperandArg(Register) {
MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT8);
pushArg(Imm32(GET_UINT8(handler.pc())));
}
template <>
void BaselineInterpreterCodeGen::pushUint8BytecodeOperandArg(Register scratch) {
LoadUint8Operand(masm, scratch);
pushArg(scratch);
}
template <>
void BaselineCompilerCodeGen::pushUint16BytecodeOperandArg(Register) {
MOZ_ASSERT(JOF_OPTYPE(JSOp(*handler.pc())) == JOF_UINT16);
pushArg(Imm32(GET_UINT16(handler.pc())));
}
template <>
void BaselineInterpreterCodeGen::pushUint16BytecodeOperandArg(
Register scratch) {
LoadUint16Operand(masm, scratch);
pushArg(scratch);
}
template <>
void BaselineCompilerCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
uint32_t length = GET_UINT32(handler.pc());
MOZ_ASSERT(length <= INT32_MAX,
"the bytecode emitter must fail to compile code that would "
"produce a length exceeding int32_t range");
masm.move32(Imm32(AssertedCast<int32_t>(length)), dest);
}
template <>
void BaselineInterpreterCodeGen::loadInt32LengthBytecodeOperand(Register dest) {
LoadInt32Operand(masm, dest);
}
template <typename Handler>
bool BaselineCodeGen<Handler>::emitDebugPrologue() {
auto ifDebuggee = [this]() {
// Load pointer to BaselineFrame in R0.
masm.loadBaselineFramePtr(FramePointer, R0.scratchReg());
prepareVMCall();
pushArg(R0.scratchReg());
const RetAddrEntry::Kind kind = RetAddrEntry::Kind::DebugPrologue;
using Fn = bool (*)(JSContext*, BaselineFrame*);
if (!callVM<Fn, jit::DebugPrologue>(kind)) {
return false;
}
return true;
};
return emitDebugInstrumentation(ifDebuggee);
}
template <>
void BaselineCompilerCodeGen::emitInitFrameFields(Register nonFunctionEnv) {
Register scratch = R0.scratchReg();
Register scratch2 = R2.scratchReg();
MOZ_ASSERT(nonFunctionEnv != scratch && nonFunctionEnv != scratch2);
masm.store32(Imm32(0), frame.addressOfFlags());
if (handler.function()) {
masm.loadFunctionFromCalleeToken(frame.addressOfCalleeToken(), scratch);
masm.unboxObject(Address(scratch, JSFunction::offsetOfEnvironment()),
scratch);
masm.storePtr(scratch, frame.addressOfEnvironmentChain());
} else {
masm.storePtr(nonFunctionEnv, frame.addressOfEnvironmentChain());
}
// If cx->inlinedICScript contains an inlined ICScript (passed from
// the caller), take that ICScript and store it in the frame, then
// overwrite cx->inlinedICScript with nullptr.
Label notInlined, done;
masm.movePtr(ImmPtr(cx->addressOfInlinedICScript()), scratch);
Address inlinedAddr(scratch, 0);
masm.branch