Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CodeGenerator.h"
#include "mozilla/Assertions.h"
#include "mozilla/Casting.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EndianUtils.h"
#include "mozilla/EnumeratedArray.h"
#include "mozilla/EnumeratedRange.h"
#include "mozilla/EnumSet.h"
#include "mozilla/IntegerTypeTraits.h"
#include "mozilla/Latin1.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/SIMD.h"
#include <limits>
#include <type_traits>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "jsnum.h"
#include "builtin/MapObject.h"
#include "builtin/RegExp.h"
#include "builtin/String.h"
#include "irregexp/RegExpTypes.h"
#include "jit/ABIArgGenerator.h"
#include "jit/CompileInfo.h"
#include "jit/InlineScriptTree.h"
#include "jit/Invalidation.h"
#include "jit/IonGenericCallStub.h"
#include "jit/IonIC.h"
#include "jit/IonScript.h"
#include "jit/JitcodeMap.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitSpewer.h"
#include "jit/JitZone.h"
#include "jit/Linker.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
#include "jit/RangeAnalysis.h"
#include "jit/RegExpStubConstants.h"
#include "jit/SafepointIndex.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/VMFunctions.h"
#include "jit/WarpSnapshot.h"
#include "js/ColumnNumber.h" // JS::LimitedColumnNumberOneOrigin
#include "js/experimental/JitInfo.h" // JSJit{Getter,Setter}CallArgs, JSJitMethodCallArgsTraits, JSJitInfo
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/RegExpFlags.h" // JS::RegExpFlag
#include "js/ScalarType.h" // js::Scalar::Type
#include "proxy/DOMProxy.h"
#include "proxy/ScriptedProxyHandler.h"
#include "util/CheckedArithmetic.h"
#include "util/DifferentialTesting.h"
#include "util/Unicode.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/BuiltinObjectKind.h"
#include "vm/ConstantCompareOperand.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Interpreter.h"
#include "vm/JSAtomUtils.h" // AtomizeString
#include "vm/MatchPairs.h"
#include "vm/RegExpObject.h"
#include "vm/RegExpStatics.h"
#include "vm/StaticStrings.h"
#include "vm/StringObject.h"
#include "vm/StringType.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmCodegenConstants.h"
#include "wasm/WasmPI.h"
#include "wasm/WasmValType.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "wasm/WasmBinary.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmGcObject.h"
#include "wasm/WasmStubs.h"
#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "jit/TemplateObject-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/JSScript-inl.h"
#include "wasm/WasmInstance-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::CheckedUint32;
using mozilla::DebugOnly;
using mozilla::FloatingPoint;
using mozilla::NegativeInfinity;
using mozilla::PositiveInfinity;
using JS::ExpandoAndGeneration;
namespace js {
namespace jit {
#ifdef CHECK_OSIPOINT_REGISTERS
template <class Op>
static void HandleRegisterDump(Op op, MacroAssembler& masm,
LiveRegisterSet liveRegs, Register activation,
Register scratch) {
const size_t baseOffset = JitActivation::offsetOfRegs();
// Handle live GPRs.
for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
Register reg = *iter;
Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
if (reg == activation) {
// To use the original value of the activation register (that's
// now on top of the stack), we need the scratch register.
masm.push(scratch);
masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
op(scratch, dump);
masm.pop(scratch);
} else {
op(reg, dump);
}
}
// Handle live FPRs.
for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
FloatRegister reg = *iter;
Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
op(reg, dump);
}
}
class StoreOp {
MacroAssembler& masm;
public:
explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
void operator()(FloatRegister reg, Address dump) {
if (reg.isDouble()) {
masm.storeDouble(reg, dump);
} else if (reg.isSingle()) {
masm.storeFloat32(reg, dump);
} else if (reg.isSimd128()) {
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
}
};
class VerifyOp {
MacroAssembler& masm;
Label* failure_;
public:
VerifyOp(MacroAssembler& masm, Label* failure)
: masm(masm), failure_(failure) {}
void operator()(Register reg, Address dump) {
masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
}
void operator()(FloatRegister reg, Address dump) {
if (reg.isDouble()) {
ScratchDoubleScope scratch(masm);
masm.loadDouble(dump, scratch);
masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
} else if (reg.isSingle()) {
ScratchFloat32Scope scratch(masm);
masm.loadFloat32(dump, scratch);
masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
} else if (reg.isSimd128()) {
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
}
};
void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
// Ensure the live registers stored by callVM did not change between
// the call and this OsiPoint. Try-catch relies on this invariant.
// Load pointer to the JitActivation in a scratch register.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
// If we should not check registers (because the instruction did not call
// into the VM, or a GC happened), we're done.
Label failure, done;
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
// Having more than one VM function call made in one visit function at
// runtime is a sec-ciritcal error, because if we conservatively assume that
// one of the function call can re-enter Ion, then the invalidation process
// will potentially add a call at a random location, by patching the code
// before the return address.
masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
// Set checkRegs to 0, so that we don't try to verify registers after we
// return from this script to the caller.
masm.store32(Imm32(0), checkRegs);
// Ignore clobbered registers. Some instructions (like LValueToInt32) modify
// temps after calling into the VM. This is fine because no other
// instructions (including this OsiPoint) will depend on them. Also
// backtracking can also use the same register for an input and an output.
// These are marked as clobbered and shouldn't get checked.
LiveRegisterSet liveRegs;
liveRegs.set() = RegisterSet::Intersect(
safepoint->liveRegs().set(),
RegisterSet::Not(safepoint->clobberedRegs().set()));
VerifyOp op(masm, &failure);
HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
masm.jump(&done);
// Do not profile the callWithABI that occurs below. This is to avoid a
// rare corner case that occurs when profiling interacts with itself:
//
// When slow profiling assertions are turned on, FunctionBoundary ops
// (which update the profiler pseudo-stack) may emit a callVM, which
// forces them to have an osi point associated with them. The
// FunctionBoundary for inline function entry is added to the caller's
// graph with a PC from the caller's code, but during codegen it modifies
// Gecko Profiler instrumentation to add the callee as the current top-most
// script. When codegen gets to the OSIPoint, and the callWithABI below is
// emitted, the codegen thinks that the current frame is the callee, but
// the PC it's using from the OSIPoint refers to the caller. This causes
// the profiler instrumentation of the callWithABI below to ASSERT, since
// the script and pc are mismatched. To avoid this, we simply omit
// instrumentation for these callWithABIs.
// Any live register captured by a safepoint (other than temp registers)
// must remain unchanged between the call and the OsiPoint instruction.
masm.bind(&failure);
masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
masm.bind(&done);
masm.pop(scratch);
}
bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
if (!checkOsiPointRegisters) {
return false;
}
if (safepoint->liveRegs().emptyGeneral() &&
safepoint->liveRegs().emptyFloat()) {
return false; // No registers to check.
}
return true;
}
void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
if (!shouldVerifyOsiPointRegs(safepoint)) {
return;
}
// Set checkRegs to 0. If we perform a VM call, the instruction
// will set it to 1.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.store32(Imm32(0), checkRegs);
masm.pop(scratch);
}
static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
// Store a copy of all live registers before performing the call.
// When we reach the OsiPoint, we can use this to check nothing
// modified them in the meantime.
// Load pointer to the JitActivation in a scratch register.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.add32(Imm32(1), checkRegs);
StoreOp op(masm);
HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
masm.pop(scratch);
}
#endif // CHECK_OSIPOINT_REGISTERS
// Before doing any call to Cpp, you should ensure that volatile
// registers are evicted by the register allocator.
void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins) {
TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
const VMFunctionData& fun = GetVMFunction(id);
// Stack is:
// ... frame ...
// [args]
#ifdef DEBUG
MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
pushedArgs_ = 0;
#endif
#ifdef CHECK_OSIPOINT_REGISTERS
if (shouldVerifyOsiPointRegs(ins->safepoint())) {
StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
}
#endif
#ifdef DEBUG
if (ins->mirRaw()) {
MOZ_ASSERT(ins->mirRaw()->isInstruction());
MInstruction* mir = ins->mirRaw()->toInstruction();
MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
// If this MIR instruction has an overridden AliasSet, set the JitRuntime's
// disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
// RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
// interrupt callbacks can call JS (chrome JS or shell testing functions).
bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
masm.move32(Imm32(1), ReturnReg);
masm.store32(ReturnReg, AbsoluteAddress(addr));
}
}
#endif
// Push an exit frame descriptor.
masm.PushFrameDescriptor(FrameType::IonJS);
// Call the wrapper function. The wrapper is in charge to unwind the stack
// when returning from the call. Failures are handled with exceptions based
// on the return value of the C functions. To guard the outcome of the
// returned value, use another LIR instruction.
ensureOsiSpace();
uint32_t callOffset = masm.callJit(code);
markSafepointAt(callOffset, ins);
#ifdef DEBUG
// Reset the disallowArbitraryCode flag after the call.
{
const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
masm.push(ReturnReg);
masm.move32(Imm32(0), ReturnReg);
masm.store32(ReturnReg, AbsoluteAddress(addr));
masm.pop(ReturnReg);
}
#endif
// Pop rest of the exit frame and the arguments left on the stack.
int framePop =
sizeof(ExitFrameLayout) - ExitFrameLayout::bytesPoppedAfterCall();
masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
// Stack is:
// ... frame ...
}
template <typename Fn, Fn fn>
void CodeGenerator::callVM(LInstruction* ins) {
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
callVMInternal(id, ins);
}
// ArgSeq store arguments for OutOfLineCallVM.
//
// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
// this function is an instance of a class which provides a "generate" in charge
// of pushing the argument, with "pushArg", for a VMFunction.
//
// Such list of arguments can be created by using the "ArgList" function which
// creates one instance of "ArgSeq", where the type of the arguments are
// inferred from the type of the arguments.
//
// The list of arguments must be written in the same order as if you were
// calling the function in C++.
//
// Example:
// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
template <typename... ArgTypes>
class ArgSeq {
std::tuple<std::remove_reference_t<ArgTypes>...> args_;
template <std::size_t... ISeq>
inline void generate(CodeGenerator* codegen,
std::index_sequence<ISeq...>) const {
// Arguments are pushed in reverse order, from last argument to first
// argument.
(codegen->pushArg(std::get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
}
public:
explicit ArgSeq(ArgTypes&&... args)
: args_(std::forward<ArgTypes>(args)...) {}
inline void generate(CodeGenerator* codegen) const {
generate(codegen, std::index_sequence_for<ArgTypes...>{});
}
#ifdef DEBUG
static constexpr size_t numArgs = sizeof...(ArgTypes);
#endif
};
template <typename... ArgTypes>
inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
}
// Store wrappers, to generate the right move of data after the VM call.
struct StoreNothing {
inline void generate(CodeGenerator* codegen) const {}
inline LiveRegisterSet clobbered() const {
return LiveRegisterSet(); // No register gets clobbered
}
};
class StoreRegisterTo {
private:
Register out_;
public:
explicit StoreRegisterTo(Register out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
// It's okay to use storePointerResultTo here - the VMFunction wrapper
// ensures the upper bytes are zero for bool/int32 return values.
codegen->storePointerResultTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
class StoreFloatRegisterTo {
private:
FloatRegister out_;
public:
explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
codegen->storeFloatResultTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
template <typename Output>
class StoreValueTo_ {
private:
Output out_;
public:
explicit StoreValueTo_(const Output& out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
codegen->storeResultValueTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
template <typename Output>
StoreValueTo_<Output> StoreValueTo(const Output& out) {
return StoreValueTo_<Output>(out);
}
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
private:
LInstruction* lir_;
ArgSeq args_;
StoreOutputTo out_;
public:
OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
const StoreOutputTo& out)
: lir_(lir), args_(args), out_(out) {}
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineCallVM(this);
}
LInstruction* lir() const { return lir_; }
const ArgSeq& args() const { return args_; }
const StoreOutputTo& out() const { return out_; }
};
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
const StoreOutputTo& out) {
MOZ_ASSERT(lir->mirRaw());
MOZ_ASSERT(lir->mirRaw()->isInstruction());
#ifdef DEBUG
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
const VMFunctionData& fun = GetVMFunction(id);
MOZ_ASSERT(fun.explicitArgs == args.numArgs);
MOZ_ASSERT(fun.returnsData() !=
(std::is_same_v<StoreOutputTo, StoreNothing>));
#endif
OutOfLineCode* ool = new (alloc())
OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
return ool;
}
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
void CodeGenerator::visitOutOfLineCallVM(
OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
LInstruction* lir = ool->lir();
#ifdef JS_JITSPEW
JitSpewStart(JitSpew_Codegen, " # LIR=%s",
lir->opName());
if (const char* extra = lir->getExtraName()) {
JitSpewCont(JitSpew_Codegen, ":%s", extra);
}
JitSpewFin(JitSpew_Codegen);
#endif
perfSpewer_.recordInstruction(masm, lir);
saveLive(lir);
ool->args().generate(this);
callVM<Fn, fn>(lir);
ool->out().generate(this);
restoreLiveIgnore(lir, ool->out().clobbered());
masm.jump(ool->rejoin());
}
class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
private:
LInstruction* lir_;
size_t cacheIndex_;
size_t cacheInfoIndex_;
public:
OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
size_t cacheInfoIndex)
: lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
void bind(MacroAssembler* masm) override {
// The binding of the initial jump is done in
// CodeGenerator::visitOutOfLineICFallback.
}
size_t cacheIndex() const { return cacheIndex_; }
size_t cacheInfoIndex() const { return cacheInfoIndex_; }
LInstruction* lir() const { return lir_; }
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineICFallback(this);
}
};
void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
if (cacheIndex == SIZE_MAX) {
masm.setOOM();
return;
}
DataPtr<IonIC> cache(this, cacheIndex);
MInstruction* mir = lir->mirRaw()->toInstruction();
cache->setScriptedLocation(mir->block()->info().script(),
mir->resumePoint()->pc());
Register temp = cache->scratchRegisterForEntryJump();
icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
masm.jump(Address(temp, 0));
MOZ_ASSERT(!icInfo_.empty());
OutOfLineICFallback* ool =
new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
addOutOfLineCode(ool, mir);
masm.bind(ool->rejoin());
cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
}
void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
LInstruction* lir = ool->lir();
size_t cacheIndex = ool->cacheIndex();
size_t cacheInfoIndex = ool->cacheInfoIndex();
DataPtr<IonIC> ic(this, cacheIndex);
// Register the location of the OOL path in the IC.
ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
switch (ic->kind()) {
case CacheKind::GetProp:
case CacheKind::GetElem: {
IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
saveLive(lir);
pushArg(getPropIC->id());
pushArg(getPropIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
HandleValue, HandleValue, MutableHandleValue);
callVM<Fn, IonGetPropertyIC::update>(lir);
StoreValueTo(getPropIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetPropSuper:
case CacheKind::GetElemSuper: {
IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
saveLive(lir);
pushArg(getPropSuperIC->id());
pushArg(getPropSuperIC->receiver());
pushArg(getPropSuperIC->object());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
HandleValue, HandleValue, MutableHandleValue);
callVM<Fn, IonGetPropSuperIC::update>(lir);
StoreValueTo(getPropSuperIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreValueTo(getPropSuperIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::SetProp:
case CacheKind::SetElem: {
IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
saveLive(lir);
pushArg(setPropIC->rhs());
pushArg(setPropIC->id());
pushArg(setPropIC->object());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
HandleObject, HandleValue, HandleValue);
callVM<Fn, IonSetPropertyIC::update>(lir);
restoreLive(lir);
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetName: {
IonGetNameIC* getNameIC = ic->asGetNameIC();
saveLive(lir);
pushArg(getNameIC->environment());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
MutableHandleValue);
callVM<Fn, IonGetNameIC::update>(lir);
StoreValueTo(getNameIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::BindName: {
IonBindNameIC* bindNameIC = ic->asBindNameIC();
saveLive(lir);
pushArg(bindNameIC->environment());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
callVM<Fn, IonBindNameIC::update>(lir);
StoreRegisterTo(bindNameIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetIterator: {
IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
saveLive(lir);
pushArg(getIteratorIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
HandleValue);
callVM<Fn, IonGetIteratorIC::update>(lir);
StoreRegisterTo(getIteratorIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreRegisterTo(getIteratorIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::OptimizeSpreadCall: {
auto* optimizeSpreadCallIC = ic->asOptimizeSpreadCallIC();
saveLive(lir);
pushArg(optimizeSpreadCallIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeSpreadCallIC*,
HandleValue, MutableHandleValue);
callVM<Fn, IonOptimizeSpreadCallIC::update>(lir);
StoreValueTo(optimizeSpreadCallIC->output()).generate(this);
restoreLiveIgnore(
lir, StoreValueTo(optimizeSpreadCallIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::In: {
IonInIC* inIC = ic->asInIC();
saveLive(lir);
pushArg(inIC->object());
pushArg(inIC->key());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
HandleObject, bool*);
callVM<Fn, IonInIC::update>(lir);
StoreRegisterTo(inIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::HasOwn: {
IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
saveLive(lir);
pushArg(hasOwnIC->id());
pushArg(hasOwnIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
HandleValue, int32_t*);
callVM<Fn, IonHasOwnIC::update>(lir);
StoreRegisterTo(hasOwnIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::CheckPrivateField: {
IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
saveLive(lir);
pushArg(checkPrivateFieldIC->id());
pushArg(checkPrivateFieldIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
HandleValue, HandleValue, bool*);
callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
restoreLiveIgnore(
lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::InstanceOf: {
IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
saveLive(lir);
pushArg(hasInstanceOfIC->rhs());
pushArg(hasInstanceOfIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
HandleValue lhs, HandleObject rhs, bool* res);
callVM<Fn, IonInstanceOfIC::update>(lir);
StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::UnaryArith: {
IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
saveLive(lir);
pushArg(unaryArithIC->input());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
IonUnaryArithIC* stub, HandleValue val,
MutableHandleValue res);
callVM<Fn, IonUnaryArithIC::update>(lir);
StoreValueTo(unaryArithIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::ToPropertyKey: {
IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
saveLive(lir);
pushArg(toPropertyKeyIC->input());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
IonToPropertyKeyIC* ic, HandleValue val,
MutableHandleValue res);
callVM<Fn, IonToPropertyKeyIC::update>(lir);
StoreValueTo(toPropertyKeyIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreValueTo(toPropertyKeyIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::BinaryArith: {
IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
saveLive(lir);
pushArg(binaryArithIC->rhs());
pushArg(binaryArithIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext* cx, HandleScript outerScript,
IonBinaryArithIC* stub, HandleValue lhs,
HandleValue rhs, MutableHandleValue res);
callVM<Fn, IonBinaryArithIC::update>(lir);
StoreValueTo(binaryArithIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::Compare: {
IonCompareIC* compareIC = ic->asCompareIC();
saveLive(lir);
pushArg(compareIC->rhs());
pushArg(compareIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
bool (*)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub,
HandleValue lhs, HandleValue rhs, bool* res);
callVM<Fn, IonCompareIC::update>(lir);
StoreRegisterTo(compareIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::CloseIter: {
IonCloseIterIC* closeIterIC = ic->asCloseIterIC();
saveLive(lir);
pushArg(closeIterIC->iter());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
bool (*)(JSContext*, HandleScript, IonCloseIterIC*, HandleObject);
callVM<Fn, IonCloseIterIC::update>(lir);
restoreLive(lir);
masm.jump(ool->rejoin());
return;
}
case CacheKind::OptimizeGetIterator: {
auto* optimizeGetIteratorIC = ic->asOptimizeGetIteratorIC();
saveLive(lir);
pushArg(optimizeGetIteratorIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonOptimizeGetIteratorIC*,
HandleValue, bool* res);
callVM<Fn, IonOptimizeGetIteratorIC::update>(lir);
StoreRegisterTo(optimizeGetIteratorIC->output()).generate(this);
restoreLiveIgnore(
lir, StoreRegisterTo(optimizeGetIteratorIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::Call:
case CacheKind::TypeOf:
case CacheKind::TypeOfEq:
case CacheKind::ToBool:
case CacheKind::LazyConstant:
case CacheKind::NewArray:
case CacheKind::NewObject:
case CacheKind::Lambda:
case CacheKind::GetImport:
MOZ_CRASH("Unsupported IC");
}
MOZ_CRASH();
}
StringObject* MNewStringObject::templateObj() const {
return &templateObj_->as<StringObject>();
}
CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
MacroAssembler* masm,
const wasm::CodeMetadata* wasmCodeMeta)
: CodeGeneratorSpecific(gen, graph, masm, wasmCodeMeta),
ionScriptLabels_(gen->alloc()),
ionNurseryObjectLabels_(gen->alloc()),
scriptCounts_(nullptr) {}
CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
void CodeGenerator::visitValueToNumberInt32(LValueToNumberInt32* lir) {
ValueOperand operand = ToValue(lir->input());
Register output = ToRegister(lir->output());
FloatRegister temp = ToFloatRegister(lir->temp0());
Label fails;
masm.convertValueToInt32(operand, temp, output, &fails,
lir->mir()->needsNegativeZeroCheck(),
lir->mir()->conversion());
bailoutFrom(&fails, lir->snapshot());
}
void CodeGenerator::visitValueTruncateToInt32(LValueTruncateToInt32* lir) {
ValueOperand operand = ToValue(lir->input());
Register output = ToRegister(lir->output());
FloatRegister temp = ToFloatRegister(lir->temp0());
Register stringReg = ToRegister(lir->temp1());
auto* oolDouble = oolTruncateDouble(temp, output, lir->mir());
using Fn = bool (*)(JSContext*, JSString*, double*);
auto* oolString = oolCallVM<Fn, StringToNumber>(lir, ArgList(stringReg),
StoreFloatRegisterTo(temp));
Label* stringEntry = oolString->entry();
Label* stringRejoin = oolString->rejoin();
Label fails;
masm.truncateValueToInt32(operand, stringEntry, stringRejoin,
oolDouble->entry(), stringReg, temp, output,
&fails);
masm.bind(oolDouble->rejoin());
bailoutFrom(&fails, lir->snapshot());
}
void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
ValueOperand operand = ToValue(lir->input());
FloatRegister output = ToFloatRegister(lir->output());
Label fail;
masm.convertValueToDouble(operand, output, &fail);
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
ValueOperand operand = ToValue(lir->input());
FloatRegister output = ToFloatRegister(lir->output());
Label fail;
masm.convertValueToFloat32(operand, output, &fail);
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitValueToFloat16(LValueToFloat16* lir) {
ValueOperand operand = ToValue(lir->input());
Register temp = ToTempRegisterOrInvalid(lir->temp0());
FloatRegister output = ToFloatRegister(lir->output());
LiveRegisterSet volatileRegs;
if (!MacroAssembler::SupportsFloat64To16()) {
volatileRegs = liveVolatileRegs(lir);
}
Label fail;
masm.convertValueToFloat16(operand, output, temp, volatileRegs, &fail);
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
ValueOperand operand = ToValue(lir->input());
Register output = ToRegister(lir->output());
using Fn = BigInt* (*)(JSContext*, HandleValue);
auto* ool =
oolCallVM<Fn, ToBigInt>(lir, ArgList(operand), StoreRegisterTo(output));
Register tag = masm.extractTag(operand, output);
Label notBigInt, done;
masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
masm.unboxBigInt(operand, output);
masm.jump(&done);
masm.bind(&notBigInt);
masm.branchTestBoolean(Assembler::Equal, tag, ool->entry());
masm.branchTestString(Assembler::Equal, tag, ool->entry());
// ToBigInt(object) can have side-effects; all other types throw a TypeError.
bailout(lir->snapshot());
masm.bind(ool->rejoin());
masm.bind(&done);
}
void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
masm.convertInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
masm.convertInt32ToFloat32(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitDoubleToFloat16(LDoubleToFloat16* lir) {
LiveRegisterSet volatileRegs;
if (!MacroAssembler::SupportsFloat64To16()) {
volatileRegs = liveVolatileRegs(lir);
}
masm.convertDoubleToFloat16(
ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
}
void CodeGenerator::visitDoubleToFloat32ToFloat16(
LDoubleToFloat32ToFloat16* lir) {
masm.convertDoubleToFloat16(
ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
ToRegister(lir->temp0()), ToRegister(lir->temp1()));
}
void CodeGenerator::visitFloat32ToFloat16(LFloat32ToFloat16* lir) {
LiveRegisterSet volatileRegs;
if (!MacroAssembler::SupportsFloat32To16()) {
volatileRegs = liveVolatileRegs(lir);
}
masm.convertFloat32ToFloat16(
ToFloatRegister(lir->input()), ToFloatRegister(lir->output()),
ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
}
void CodeGenerator::visitInt32ToFloat16(LInt32ToFloat16* lir) {
LiveRegisterSet volatileRegs;
if (!MacroAssembler::SupportsFloat32To16()) {
volatileRegs = liveVolatileRegs(lir);
}
masm.convertInt32ToFloat16(
ToRegister(lir->input()), ToFloatRegister(lir->output()),
ToTempRegisterOrInvalid(lir->temp0()), volatileRegs);
}
void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertDoubleToInt32(input, output, &fail,
lir->mir()->needsNegativeZeroCheck());
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertFloat32ToInt32(input, output, &fail,
lir->mir()->needsNegativeZeroCheck());
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitInt32ToIntPtr(LInt32ToIntPtr* lir) {
#ifdef JS_64BIT
// This LIR instruction is only used if the input can be negative.
MOZ_ASSERT(lir->mir()->canBeNegative());
Register output = ToRegister(lir->output());
const LAllocation* input = lir->input();
if (input->isGeneralReg()) {
masm.move32SignExtendToPtr(ToRegister(input), output);
} else {
masm.load32SignExtendToPtr(ToAddress(input), output);
}
#else
MOZ_CRASH("Not used on 32-bit platforms");
#endif
}
void CodeGenerator::visitNonNegativeIntPtrToInt32(
LNonNegativeIntPtrToInt32* lir) {
#ifdef JS_64BIT
Register output = ToRegister(lir->output());
MOZ_ASSERT(ToRegister(lir->input()) == output);
Label bail;
masm.guardNonNegativeIntPtrToInt32(output, &bail);
bailoutFrom(&bail, lir->snapshot());
#else
MOZ_CRASH("Not used on 32-bit platforms");
#endif
}
void CodeGenerator::visitIntPtrToDouble(LIntPtrToDouble* lir) {
Register input = ToRegister(lir->input());
FloatRegister output = ToFloatRegister(lir->output());
masm.convertIntPtrToDouble(input, output);
}
void CodeGenerator::visitAdjustDataViewLength(LAdjustDataViewLength* lir) {
Register output = ToRegister(lir->output());
MOZ_ASSERT(ToRegister(lir->input()) == output);
uint32_t byteSize = lir->mir()->byteSize();
#ifdef DEBUG
Label ok;
masm.branchTestPtr(Assembler::NotSigned, output, output, &ok);
masm.assumeUnreachable("Unexpected negative value in LAdjustDataViewLength");
masm.bind(&ok);
#endif
Label bail;
masm.branchSubPtr(Assembler::Signed, Imm32(byteSize - 1), output, &bail);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::emitOOLTestObject(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch) {
saveVolatile(scratch);
#if defined(DEBUG) || defined(FUZZING)
masm.loadPtr(AbsoluteAddress(
gen->runtime->addressOfHasSeenObjectEmulateUndefinedFuse()),
scratch);
using Fn = bool (*)(JSObject* obj, size_t fuseValue);
masm.setupAlignedABICall();
masm.passABIArg(objreg);
masm.passABIArg(scratch);
masm.callWithABI<Fn, js::EmulatesUndefinedCheckFuse>();
#else
using Fn = bool (*)(JSObject* obj);
masm.setupAlignedABICall();
masm.passABIArg(objreg);
masm.callWithABI<Fn, js::EmulatesUndefined>();
#endif
masm.storeCallPointerResult(scratch);
restoreVolatile(scratch);
masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
masm.jump(ifDoesntEmulateUndefined);
}
// Base out-of-line code generator for all tests of the truthiness of an
// object, where the object might not be truthy. (Recall that per spec all
// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
// flag to permit objects to look like |undefined| in certain contexts,
// including in object truthiness testing.) We check truthiness inline except
// when we're testing it on a proxy, in which case out-of-line code will call
// EmulatesUndefined for a conclusive answer.
class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
Register objreg_;
Register scratch_;
Label* ifEmulatesUndefined_;
Label* ifDoesntEmulateUndefined_;
#ifdef DEBUG
bool initialized() { return ifEmulatesUndefined_ != nullptr; }
#endif
public:
OutOfLineTestObject()
: ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
void accept(CodeGenerator* codegen) final {
MOZ_ASSERT(initialized());
codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
ifDoesntEmulateUndefined_, scratch_);
}
// Specify the register where the object to be tested is found, labels to
// jump to if the object is truthy or falsy, and a scratch register for
// use in the out-of-line path.
void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch) {
MOZ_ASSERT(!initialized());
MOZ_ASSERT(ifEmulatesUndefined);
objreg_ = objreg;
scratch_ = scratch;
ifEmulatesUndefined_ = ifEmulatesUndefined;
ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
}
};
// A subclass of OutOfLineTestObject containing two extra labels, for use when
// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
// code. The user should bind these labels in inline code, and specify them as
// targets via setInputAndTargets, as appropriate.
class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
Label label1_;
Label label2_;
public:
OutOfLineTestObjectWithLabels() = default;
Label* label1() { return &label1_; }
Label* label2() { return &label2_; }
};
void CodeGenerator::testObjectEmulatesUndefinedKernel(
Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch,
OutOfLineTestObject* ool) {
ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
scratch);
// Perform a fast-path check of the object's class flags if the object's
// not a proxy. Let out-of-line code handle the slow cases that require
// saving registers, making a function call, and restoring registers.
masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
ifEmulatesUndefined);
}
void CodeGenerator::branchTestObjectEmulatesUndefined(
Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch,
OutOfLineTestObject* ool) {
MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
"ifDoesntEmulateUndefined will be bound to the fallthrough path");
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
ifDoesntEmulateUndefined, scratch, ool);
masm.bind(ifDoesntEmulateUndefined);
}
void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch,
OutOfLineTestObject* ool) {
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
ifDoesntEmulateUndefined, scratch, ool);
masm.jump(ifDoesntEmulateUndefined);
}
void CodeGenerator::testValueTruthyForType(
JSValueType type, ScratchTagScope& tag, const ValueOperand& value,
Register tempToUnbox, Register temp, FloatRegister floatTemp,
Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool,
bool skipTypeTest) {
#ifdef DEBUG
if (skipTypeTest) {
Label expected;
masm.branchTestType(Assembler::Equal, tag, type, &expected);
masm.assumeUnreachable("Unexpected Value type in testValueTruthyForType");
masm.bind(&expected);
}
#endif
// Handle irregular types first.
switch (type) {
case JSVAL_TYPE_UNDEFINED:
case JSVAL_TYPE_NULL:
// Undefined and null are falsy.
if (!skipTypeTest) {
masm.branchTestType(Assembler::Equal, tag, type, ifFalsy);
} else {
masm.jump(ifFalsy);
}
return;
case JSVAL_TYPE_SYMBOL:
// Symbols are truthy.
if (!skipTypeTest) {
masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
} else {
masm.jump(ifTruthy);
}
return;
case JSVAL_TYPE_OBJECT: {
Label notObject;
if (!skipTypeTest) {
masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
}
ScratchTagScopeRelease _(&tag);
Register objreg = masm.extractObject(value, tempToUnbox);
testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, temp, ool);
masm.bind(&notObject);
return;
}
default:
break;
}
// Check the type of the value (unless this is the last possible type).
Label differentType;
if (!skipTypeTest) {
masm.branchTestType(Assembler::NotEqual, tag, type, &differentType);
}
// Branch if the value is falsy.
ScratchTagScopeRelease _(&tag);
switch (type) {
case JSVAL_TYPE_BOOLEAN: {
masm.branchTestBooleanTruthy(false, value, ifFalsy);
break;
}
case JSVAL_TYPE_INT32: {
masm.branchTestInt32Truthy(false, value, ifFalsy);
break;
}
case JSVAL_TYPE_STRING: {
masm.branchTestStringTruthy(false, value, ifFalsy);
break;
}
case JSVAL_TYPE_BIGINT: {
masm.branchTestBigIntTruthy(false, value, ifFalsy);
break;
}
case JSVAL_TYPE_DOUBLE: {
masm.unboxDouble(value, floatTemp);
masm.branchTestDoubleTruthy(false, floatTemp, ifFalsy);
break;
}
default:
MOZ_CRASH("Unexpected value type");
}
// If we reach this point, the value is truthy. We fall through for
// truthy on the last test; otherwise, branch.
if (!skipTypeTest) {
masm.jump(ifTruthy);
}
masm.bind(&differentType);
}
void CodeGenerator::testValueTruthy(const ValueOperand& value,
Register tempToUnbox, Register temp,
FloatRegister floatTemp,
const TypeDataList& observedTypes,
Label* ifTruthy, Label* ifFalsy,
OutOfLineTestObject* ool) {
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
const std::initializer_list<JSValueType> defaultOrder = {
JSVAL_TYPE_UNDEFINED, JSVAL_TYPE_NULL, JSVAL_TYPE_BOOLEAN,
JSVAL_TYPE_INT32, JSVAL_TYPE_OBJECT, JSVAL_TYPE_STRING,
JSVAL_TYPE_DOUBLE, JSVAL_TYPE_SYMBOL, JSVAL_TYPE_BIGINT};
mozilla::EnumSet<JSValueType, uint32_t> remaining(defaultOrder);
// Generate tests for previously observed types first.
// The TypeDataList is sorted by descending frequency.
for (auto& observed : observedTypes) {
JSValueType type = observed.type();
remaining -= type;
testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
ifTruthy, ifFalsy, ool, /*skipTypeTest*/ false);
}
// Generate tests for remaining types.
for (auto type : defaultOrder) {
if (!remaining.contains(type)) {
continue;
}
remaining -= type;
// We don't need a type test for the last possible type.
bool skipTypeTest = remaining.isEmpty();
testValueTruthyForType(type, tag, value, tempToUnbox, temp, floatTemp,
ifTruthy, ifFalsy, ool, skipTypeTest);
}
MOZ_ASSERT(remaining.isEmpty());
// We fall through if the final test is truthy.
}
void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
Register input = ToRegister(test->input());
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
if (isNextBlock(ifFalse->lir())) {
masm.branchTest32(Assembler::NonZero, input, input,
getJumpLabelForBranch(ifTrue));
} else {
masm.branchTest32(Assembler::Zero, input, input,
getJumpLabelForBranch(ifFalse));
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitTestIPtrAndBranch(LTestIPtrAndBranch* test) {
Register input = ToRegister(test->input());
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
if (isNextBlock(ifFalse->lir())) {
masm.branchTestPtr(Assembler::NonZero, input, input,
getJumpLabelForBranch(ifTrue));
} else {
masm.branchTestPtr(Assembler::Zero, input, input,
getJumpLabelForBranch(ifFalse));
jumpToBlock(ifTrue);
}
}
void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* test) {
Register64 input = ToRegister64(test->input());
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
if (isNextBlock(ifFalse->lir())) {
masm.branchTest64(Assembler::NonZero, input, input,
getJumpLabelForBranch(ifTrue));
} else if (isNextBlock(ifTrue->lir())) {
masm.branchTest64(Assembler::Zero, input, input,
getJumpLabelForBranch(ifFalse));
} else {
masm.branchTest64(Assembler::NonZero, input, input,
getJumpLabelForBranch(ifTrue),
getJumpLabelForBranch(ifFalse));
}
}
void CodeGenerator::visitTestBIAndBranch(LTestBIAndBranch* lir) {
Register input = ToRegister(lir->input());
MBasicBlock* ifTrue = lir->ifTrue();
MBasicBlock* ifFalse = lir->ifFalse();
if (isNextBlock(ifFalse->lir())) {
masm.branchIfBigIntIsNonZero(input, getJumpLabelForBranch(ifTrue));
} else {
masm.branchIfBigIntIsZero(input, getJumpLabelForBranch(ifFalse));
jumpToBlock(ifTrue);
}
}
static Assembler::Condition ReverseCondition(Assembler::Condition condition) {
switch (condition) {
case Assembler::Equal:
case Assembler::NotEqual:
return condition;
case Assembler::Above:
return Assembler::Below;
case Assembler::AboveOrEqual:
return Assembler::BelowOrEqual;
case Assembler::Below:
return Assembler::Above;
case Assembler::BelowOrEqual:
return Assembler::AboveOrEqual;
case Assembler::GreaterThan:
return Assembler::LessThan;
case Assembler::GreaterThanOrEqual:
return Assembler::LessThanOrEqual;
case Assembler::LessThan:
return Assembler::GreaterThan;
case Assembler::LessThanOrEqual:
return Assembler::GreaterThanOrEqual;
default:
break;
}
MOZ_CRASH("unhandled condition");
}
void CodeGenerator::visitCompare(LCompare* comp) {
MCompare::CompareType compareType = comp->mir()->compareType();
Assembler::Condition cond = JSOpToCondition(compareType, comp->jsop());
Register left = ToRegister(comp->left());
const LAllocation* right = comp->right();
Register output = ToRegister(comp->output());
if (compareType == MCompare::Compare_Object ||
compareType == MCompare::Compare_Symbol ||
compareType == MCompare::Compare_IntPtr ||
compareType == MCompare::Compare_UIntPtr ||
compareType == MCompare::Compare_WasmAnyRef) {
if (right->isConstant()) {
MOZ_ASSERT(compareType == MCompare::Compare_IntPtr ||
compareType == MCompare::Compare_UIntPtr);
masm.cmpPtrSet(cond, left, ImmWord(ToInt32(right)), output);
} else if (right->isGeneralReg()) {
masm.cmpPtrSet(cond, left, ToRegister(right), output);
} else {
masm.cmpPtrSet(ReverseCondition(cond), ToAddress(right), left, output);
}
return;
}
MOZ_ASSERT(compareType == MCompare::Compare_Int32 ||
compareType == MCompare::Compare_UInt32);
if (right->isConstant()) {
masm.cmp32Set(cond, left, Imm32(ToInt32(right)), output);
} else if (right->isGeneralReg()) {
masm.cmp32Set(cond, left, ToRegister(right), output);
} else {
masm.cmp32Set(ReverseCondition(cond), ToAddress(right), left, output);
}
}
void CodeGenerator::visitStrictConstantCompareInt32(
LStrictConstantCompareInt32* comp) {
ValueOperand value = ToValue(comp->value());
int32_t constantVal = comp->mir()->constant();
JSOp op = comp->mir()->jsop();
Register output = ToRegister(comp->output());
Label fail, pass, done, maybeDouble;
masm.branchTestInt32(Assembler::NotEqual, value, &maybeDouble);
masm.branch32(JSOpToCondition(op, true), value.payloadOrValueReg(),
Imm32(constantVal), &pass,
MacroAssembler::LhsHighBitsAreClean::No);
masm.jump(&fail);
masm.bind(&maybeDouble);
{
FloatRegister unboxedValue = ToFloatRegister(comp->temp0());
FloatRegister floatPayload = ToFloatRegister(comp->temp1());
masm.branchTestDouble(Assembler::NotEqual, value,
op == JSOp::StrictEq ? &fail : &pass);
masm.unboxDouble(value, unboxedValue);
masm.loadConstantDouble(double(constantVal), floatPayload);
masm.branchDouble(JSOpToDoubleCondition(op), unboxedValue, floatPayload,
&pass);
}
masm.bind(&fail);
masm.move32(Imm32(0), output);
masm.jump(&done);
masm.bind(&pass);
masm.move32(Imm32(1), output);
masm.bind(&done);
}
void CodeGenerator::visitStrictConstantCompareBoolean(
LStrictConstantCompareBoolean* comp) {
ValueOperand value = ToValue(comp->value());
bool constantVal = comp->mir()->constant();
JSOp op = comp->mir()->jsop();
Register output = ToRegister(comp->output());
Label fail, pass, done;
Register boolUnboxed = ToRegister(comp->temp0());
masm.fallibleUnboxBoolean(value, boolUnboxed,
op == JSOp::StrictEq ? &fail : &pass);
masm.branch32(JSOpToCondition(op, true), boolUnboxed, Imm32(constantVal),
&pass);
masm.bind(&fail);
masm.move32(Imm32(0), output);
masm.jump(&done);
masm.bind(&pass);
masm.move32(Imm32(1), output);
masm.bind(&done);
}
void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
MCompare::CompareType compareType = comp->cmpMir()->compareType();
Assembler::Condition cond = JSOpToCondition(compareType, comp->jsop());
Register left = ToRegister(comp->left());
const LAllocation* right = comp->right();
MBasicBlock* ifTrue = comp->ifTrue();
MBasicBlock* ifFalse = comp->ifFalse();
// If the next block is the true case, invert the condition to fall through.
Label* label;
if (isNextBlock(ifTrue->lir())) {
cond = Assembler::InvertCondition(cond);
label = getJumpLabelForBranch(ifFalse);
} else {
label = getJumpLabelForBranch(ifTrue);
}
if (compareType == MCompare::Compare_Object ||
compareType == MCompare::Compare_Symbol ||
compareType == MCompare::Compare_IntPtr ||
compareType == MCompare::Compare_UIntPtr ||
compareType == MCompare::Compare_WasmAnyRef) {
if (right->isConstant()) {
MOZ_ASSERT(compareType == MCompare::Compare_IntPtr ||
compareType == MCompare::Compare_UIntPtr);
masm.branchPtr(cond, left, ImmWord(ToInt32(right)), label);
} else if (right->isGeneralReg()) {
masm.branchPtr(cond, left, ToRegister(right), label);
} else {
masm.branchPtr(ReverseCondition(cond), ToAddress(right), left, label);
}
} else {
MOZ_ASSERT(compareType == MCompare::Compare_Int32 ||
compareType == MCompare::Compare_UInt32);
if (right->isConstant()) {
masm.branch32(cond, left, Imm32(ToInt32(right)), label);
} else if (right->isGeneralReg()) {
masm.branch32(cond, left, ToRegister(right), label);
} else {
masm.branch32(ReverseCondition(cond), ToAddress(right), left, label);
}
}
if (!isNextBlock(ifTrue->lir())) {
jumpToBlock(ifFalse);
}
}
void CodeGenerator::visitCompareI64(LCompareI64* lir) {
MCompare::CompareType compareType = lir->mir()->compareType();
MOZ_ASSERT(compareType == MCompare::Compare_Int64 ||
compareType == MCompare::Compare_UInt64);
bool isSigned = compareType == MCompare::Compare_Int64;
Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
Register64 left = ToRegister64(lir->left());
LInt64Allocation right = lir->right();
Register output = ToRegister(lir->output());
if (IsConstant(right)) {
masm.cmp64Set(cond, left, Imm64(ToInt64(right)), output);
} else if (IsRegister64(right)) {
masm.cmp64Set(cond, left, ToRegister64(right), output);
} else {
masm.cmp64Set(ReverseCondition(cond), ToAddress(right), left, output);
}
}
void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
MCompare::CompareType compareType = lir->cmpMir()->compareType();
MOZ_ASSERT(compareType == MCompare::Compare_Int64 ||
compareType == MCompare::Compare_UInt64);
bool isSigned = compareType == MCompare::Compare_Int64;
Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
Register64 left = ToRegister64(lir->left());
LInt64Allocation right = lir->right();
MBasicBlock* ifTrue = lir->ifTrue();
MBasicBlock* ifFalse = lir->ifFalse();
Label* trueLabel = getJumpLabelForBranch(ifTrue);
Label* falseLabel = getJumpLabelForBranch(ifFalse);
// If the next block is the true case, invert the condition to fall through.
if (isNextBlock(ifTrue->lir())) {
cond = Assembler::InvertCondition(cond);
trueLabel = falseLabel;
falseLabel = nullptr;
} else if (isNextBlock(ifFalse->lir())) {
falseLabel = nullptr;
}
if (IsConstant(right)) {
masm.branch64(cond, left, Imm64(ToInt64(right)), trueLabel, falseLabel);
} else if (IsRegister64(right)) {
masm.branch64(cond, left, ToRegister64(right), trueLabel, falseLabel);
} else {
masm.branch64(ReverseCondition(cond), ToAddress(right), left, trueLabel,
falseLabel);
}
}
void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
Assembler::Condition cond = baab->cond();
MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
Register left = ToRegister(baab->left());
const LAllocation* right = baab->right();
MBasicBlock* ifTrue = baab->ifTrue();
MBasicBlock* ifFalse = baab->ifFalse();
// If the next block is the true case, invert the condition to fall through.
Label* label;
if (isNextBlock(ifTrue->lir())) {
cond = Assembler::InvertCondition(cond);
label = getJumpLabelForBranch(ifFalse);
} else {
label = getJumpLabelForBranch(ifTrue);
}
if (right->isConstant()) {
masm.branchTest32(cond, left, Imm32(ToInt32(right)), label);
} else {
masm.branchTest32(cond, left, ToRegister(right), label);
}
if (!isNextBlock(ifTrue->lir())) {
jumpToBlock(ifFalse);
}
}
void CodeGenerator::visitBitAnd64AndBranch(LBitAnd64AndBranch* baab) {
Assembler::Condition cond = baab->cond();
MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
Register64 left = ToRegister64(baab->left());
LInt64Allocation right = baab->right();
MBasicBlock* ifTrue = baab->ifTrue();
MBasicBlock* ifFalse = baab->ifFalse();
Label* trueLabel = getJumpLabelForBranch(ifTrue);
Label* falseLabel = getJumpLabelForBranch(ifFalse);
// If the next block is the true case, invert the condition to fall through.
if (isNextBlock(ifTrue->lir())) {
cond = Assembler::InvertCondition(cond);
trueLabel = falseLabel;
falseLabel = nullptr;
} else if (isNextBlock(ifFalse->lir())) {
falseLabel = nullptr;
}
if (IsConstant(right)) {
masm.branchTest64(cond, left, Imm64(ToInt64(right)), trueLabel, falseLabel);
} else {
masm.branchTest64(cond, left, ToRegister64(right), trueLabel, falseLabel);
}
}
void CodeGenerator::assertObjectDoesNotEmulateUndefined(
Register input, Register temp, const MInstruction* mir) {
#if defined(DEBUG) || defined(FUZZING)
// Validate that the object indeed doesn't have the emulates undefined flag.
auto* ool = new (alloc()) OutOfLineTestObjectWithLabels();
addOutOfLineCode(ool, mir);
Label* doesNotEmulateUndefined = ool->label1();
Label* emulatesUndefined = ool->label2();
testObjectEmulatesUndefined(input, emulatesUndefined, doesNotEmulateUndefined,
temp, ool);
masm.bind(emulatesUndefined);
masm.assumeUnreachable(
"Found an object emulating undefined while the fuse is intact");
masm.bind(doesNotEmulateUndefined);
#endif
}
void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
Register input = ToRegister(lir->input());
Register temp = ToRegister(lir->temp0());
bool intact = hasSeenObjectEmulateUndefinedFuseIntactAndDependencyNoted();
if (intact) {
assertObjectDoesNotEmulateUndefined(input, temp, lir->mir());
// Bug 1874905: It would be fantastic if this could be optimized out
masm.jump(truthy);
} else {
auto* ool = new (alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
testObjectEmulatesUndefined(input, falsy, truthy, temp, ool);
}
}
void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
auto* ool = new (alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
ValueOperand input = ToValue(lir->input());
Register tempToUnbox = ToTempUnboxRegister(lir->temp1());
Register temp = ToRegister(lir->temp2());
FloatRegister floatTemp = ToFloatRegister(lir->temp0());
const TypeDataList& observedTypes = lir->mir()->observedTypes();
testValueTruthy(input, tempToUnbox, temp, floatTemp, observedTypes, truthy,
falsy, ool);
masm.jump(truthy);
}
void CodeGenerator::visitBooleanToString(LBooleanToString* lir) {
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
const JSAtomState& names = gen->runtime->names();
Label true_, done;
masm.branchTest32(Assembler::NonZero, input, input, &true_);
masm.movePtr(ImmGCPtr(names.false_), output);
masm.jump(&done);
masm.bind(&true_);
masm.movePtr(ImmGCPtr(names.true_), output);
masm.bind(&done);
}
void CodeGenerator::visitIntToString(LIntToString* lir) {
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
using Fn = JSLinearString* (*)(JSContext*, int);
OutOfLineCode* ool = oolCallVM<Fn, Int32ToString<CanGC>>(
lir, ArgList(input), StoreRegisterTo(output));
masm.lookupStaticIntString(input, output, gen->runtime->staticStrings(),
ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitDoubleToString(LDoubleToString* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register temp = ToRegister(lir->temp0());
Register output = ToRegister(lir->output());
using Fn = JSString* (*)(JSContext*, double);
OutOfLineCode* ool = oolCallVM<Fn, NumberToString<CanGC>>(
lir, ArgList(input), StoreRegisterTo(output));
// Try double to integer conversion and run integer to string code.
masm.convertDoubleToInt32(input, temp, ool->entry(), false);
masm.lookupStaticIntString(temp, output, gen->runtime->staticStrings(),
ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitValueToString(LValueToString* lir) {
ValueOperand input = ToValue(lir->input());
Register output = ToRegister(lir->output());
using Fn = JSString* (*)(JSContext*, HandleValue);
OutOfLineCode* ool = oolCallVM<Fn, ToStringSlow<CanGC>>(
lir, ArgList(input), StoreRegisterTo(output));
Label done;
Register tag = masm.extractTag(input, output);
const JSAtomState& names = gen->runtime->names();
// String
{
Label notString;
masm.branchTestString(Assembler::NotEqual, tag, &notString);
masm.unboxString(input, output);
masm.jump(&done);
masm.bind(&notString);
}
// Integer
{
Label notInteger;
masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
Register unboxed = ToTempUnboxRegister(lir->temp0());
unboxed = masm.extractInt32(input, unboxed);
masm.lookupStaticIntString(unboxed, output, gen->runtime->staticStrings(),
ool->entry());
masm.jump(&done);
masm.bind(&notInteger);
}
// Double
{
// Note: no fastpath. Need two extra registers and can only convert doubles
// that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
}
// Undefined
{
Label notUndefined;
masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
masm.movePtr(ImmGCPtr(names.undefined), output);
masm.jump(&done);
masm.bind(&notUndefined);
}
// Null
{
Label notNull;
masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
masm.movePtr(ImmGCPtr(names.null), output);
masm.jump(&done);
masm.bind(&notNull);
}
// Boolean
{
Label notBoolean, true_;
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
masm.branchTestBooleanTruthy(true, input, &true_);
masm.movePtr(ImmGCPtr(names.false_), output);
masm.jump(&done);
masm.bind(&true_);
masm.movePtr(ImmGCPtr(names.true_), output);
masm.jump(&done);
masm.bind(&notBoolean);
}
// Objects/symbols are only possible when |mir->mightHaveSideEffects()|.
if (lir->mir()->mightHaveSideEffects()) {
// Object
if (lir->mir()->supportSideEffects()) {
masm.branchTestObject(Assembler::Equal, tag, ool->entry());
} else {
// Bail.
MOZ_ASSERT(lir->mir()->needsSnapshot());
Label bail;
masm.branchTestObject(Assembler::Equal, tag, &bail);
bailoutFrom(&bail, lir->snapshot());
}
// Symbol
if (lir->mir()->supportSideEffects()) {
masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
} else {
// Bail.
MOZ_ASSERT(lir->mir()->needsSnapshot());
Label bail;
masm.branchTestSymbol(Assembler::Equal, tag, &bail);
bailoutFrom(&bail, lir->snapshot());
}
}
// BigInt
{
// No fastpath currently implemented.
masm.branchTestBigInt(Assembler::Equal, tag, ool->entry());
}
masm.assumeUnreachable("Unexpected type for LValueToString.");
masm.bind(&done);
masm.bind(ool->rejoin());
}
using StoreBufferMutationFn = void (*)(js::gc::StoreBuffer*, js::gc::Cell**);
static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder,
size_t offset, Register buffer,
LiveGeneralRegisterSet& liveVolatiles,
StoreBufferMutationFn fun) {
Label callVM;
Label exit;
// Call into the VM to barrier the write. The only registers that need to
// be preserved are those in liveVolatiles, so once they are saved on the
// stack all volatile registers are available for use.
masm.bind(&callVM);
masm.PushRegsInMask(liveVolatiles);
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
regs.takeUnchecked(buffer);
regs.takeUnchecked(holder);
Register addrReg = regs.takeAny();
masm.computeEffectiveAddress(Address(holder, offset), addrReg);
bool needExtraReg = !regs.hasAny<GeneralRegisterSet::DefaultType>();
if (needExtraReg) {
masm.push(holder);
masm.setupUnalignedABICall(holder);
} else {
masm.setupUnalignedABICall(regs.takeAny());
}
masm.passABIArg(buffer);
masm.passABIArg(addrReg);
masm.callWithABI(DynamicFunction<StoreBufferMutationFn>(fun),
ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
if (needExtraReg) {
masm.pop(holder);
}
masm.PopRegsInMask(liveVolatiles);
masm.bind(&exit);
}
// Warning: this function modifies prev and next.
static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder,
size_t offset, Register prev, Register next,
LiveGeneralRegisterSet& liveVolatiles) {
Label exit;
Label checkRemove, putCell;
// if (next && (buffer = next->storeBuffer()))
// but we never pass in nullptr for next.
Register storebuffer = next;
masm.loadStoreBuffer(next, storebuffer);
masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove);
// if (prev && prev->storeBuffer())
masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell);
masm.loadStoreBuffer(prev, prev);
masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit);
// buffer->putCell(cellp)
masm.bind(&putCell);
EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
JSString::addCellAddressToStoreBuffer);
masm.jump(&exit);
// if (prev && (buffer = prev->storeBuffer()))
masm.bind(&checkRemove);
masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit);
masm.loadStoreBuffer(prev, storebuffer);
masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit);
EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles,
JSString::removeCellAddressFromStoreBuffer);
masm.bind(&exit);
}
void CodeGenerator::visitRegExp(LRegExp* lir) {
Register output = ToRegister(lir->output());
Register temp = ToRegister(lir->temp0());
JSObject* source = lir->mir()->source();
using Fn = JSObject* (*)(JSContext*, Handle<RegExpObject*>);
OutOfLineCode* ool = oolCallVM<Fn, CloneRegExpObject>(
lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output));
if (lir->mir()->hasShared()) {
TemplateObject templateObject(source);
masm.createGCObject(output, temp, templateObject, gc::Heap::Default,
ool->entry());
} else {
masm.jump(ool->entry());
}
masm.bind(ool->rejoin());
}
static constexpr int32_t RegExpPairsVectorStartOffset(
int32_t inputOutputDataStartOffset) {
return inputOutputDataStartOffset + int32_t(InputOutputDataSize) +
int32_t(sizeof(MatchPairs));
}
static Address RegExpPairCountAddress(MacroAssembler& masm,
int32_t inputOutputDataStartOffset) {
return Address(FramePointer, inputOutputDataStartOffset +
int32_t(InputOutputDataSize) +
MatchPairs::offsetOfPairCount());
}
static void UpdateRegExpStatics(MacroAssembler& masm, Register regexp,
Register input, Register lastIndex,
Register staticsReg, Register temp1,
Register temp2, gc::Heap initialStringHeap,
LiveGeneralRegisterSet& volatileRegs) {
Address pendingInputAddress(staticsReg,
RegExpStatics::offsetOfPendingInput());
Address matchesInputAddress(staticsReg,
RegExpStatics::offsetOfMatchesInput());
Address lazySourceAddress(staticsReg, RegExpStatics::offsetOfLazySource());
Address lazyIndexAddress(staticsReg, RegExpStatics::offsetOfLazyIndex());
masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String);
masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String);
masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String);
if (initialStringHeap == gc::Heap::Default) {
// Writing into RegExpStatics tenured memory; must post-barrier.
if (staticsReg.volatile_()) {
volatileRegs.add(staticsReg);
}
masm.loadPtr(pendingInputAddress, temp1);
masm.storePtr(input, pendingInputAddress);
masm.movePtr(input, temp2);
EmitPostWriteBarrierS(masm, staticsReg,
RegExpStatics::offsetOfPendingInput(),
temp1 /* prev */, temp2 /* next */, volatileRegs);
masm.loadPtr(matchesInputAddress, temp1);
masm.storePtr(input, matchesInputAddress);
masm.movePtr(input, temp2);
EmitPostWriteBarrierS(masm, staticsReg,
RegExpStatics::offsetOfMatchesInput(),
temp1 /* prev */, temp2 /* next */, volatileRegs);
} else {
masm.debugAssertGCThingIsTenured(input, temp1);
masm.storePtr(input, pendingInputAddress);
masm.storePtr(input, matchesInputAddress);
}
masm.storePtr(lastIndex,
Address(staticsReg, RegExpStatics::offsetOfLazyIndex()));
masm.store32(
Imm32(1),
Address(staticsReg, RegExpStatics::offsetOfPendingLazyEvaluation()));
masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
RegExpObject::SHARED_SLOT)),
temp1, JSVAL_TYPE_PRIVATE_GCTHING);
masm.loadPtr(Address(temp1, RegExpShared::offsetOfSource()), temp2);
masm.storePtr(temp2, lazySourceAddress);
static_assert(sizeof(JS::RegExpFlags) == 1, "load size must match flag size");
masm.load8ZeroExtend(Address(temp1, RegExpShared::offsetOfFlags()), temp2);
masm.store8(temp2, Address(staticsReg, RegExpStatics::offsetOfLazyFlags()));
}
// Prepare an InputOutputData and optional MatchPairs which space has been
// allocated for on the stack, and try to execute a RegExp on a string input.
// If the RegExp was successfully executed and matched the input, fallthrough.
// Otherwise, jump to notFound or failure.
//
// inputOutputDataStartOffset is the offset relative to the frame pointer
// register. This offset is negative for the RegExpExecTest stub.
static bool PrepareAndExecuteRegExp(MacroAssembler& masm, Register regexp,
Register input, Register lastIndex,
Register temp1, Register temp2,
Register temp3,
int32_t inputOutputDataStartOffset,
gc::Heap initialStringHeap, Label* notFound,
Label* failure) {
JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp");
using irregexp::InputOutputData;
/*
* [SMDOC] Stack layout for PrepareAndExecuteRegExp
*
* Before this function is called, the caller is responsible for
* allocating enough stack space for the following data:
*
* inputOutputDataStartOffset +-----> +---------------+
* |InputOutputData|
* inputStartAddress +----------> inputStart|
* inputEndAddress +----------> inputEnd|
* startIndexAddress +----------> startIndex|
* matchesAddress +----------> matches|-----+
* +---------------+ |
* matchPairs(Address|Offset) +-----> +---------------+ <--+
* | MatchPairs |
* pairCountAddress +----------> count |
* pairsPointerAddress +----------> pairs |-----+
* +---------------+ |
* pairsArray(Address|Offset) +-----> +---------------+ <--+
* | MatchPair |
* firstMatchStartAddress +----------> start | <--+
* | limit | |
* +---------------+ |
* . |
* . Reserved space for
* . RegExpObject::MaxPairCount
* . MatchPair objects
* . |
* +---------------+ |
* | MatchPair | |
* | start | |
* | limit | <--+
* +---------------+
*/
int32_t ioOffset = inputOutputDataStartOffset;
int32_t matchPairsOffset = ioOffset + int32_t(sizeof(InputOutputData));
int32_t pairsArrayOffset = matchPairsOffset + int32_t(sizeof(MatchPairs));
Address inputStartAddress(FramePointer,
ioOffset + InputOutputData::offsetOfInputStart());
Address inputEndAddress(FramePointer,
ioOffset + InputOutputData::offsetOfInputEnd());
Address startIndexAddress(FramePointer,
ioOffset + InputOutputData::offsetOfStartIndex());
Address matchesAddress(FramePointer,
ioOffset + InputOutputData::offsetOfMatches());
Address matchPairsAddress(FramePointer, matchPairsOffset);
Address pairCountAddress(FramePointer,
matchPairsOffset + MatchPairs::offsetOfPairCount());
Address pairsPointerAddress(FramePointer,
matchPairsOffset + MatchPairs::offsetOfPairs());
Address pairsArrayAddress(FramePointer, pairsArrayOffset);
Address firstMatchStartAddress(FramePointer,
pairsArrayOffset + MatchPair::offsetOfStart());
// First, fill in a skeletal MatchPairs instance on the stack. This will be
// passed to the OOL stub in the caller if we aren't able to execute the
// RegExp inline, and that stub needs to be able to determine whether the
// execution finished successfully.
// Initialize MatchPairs::pairCount to 1. The correct value can only
// be determined after loading the RegExpShared. If the RegExpShared
// has Kind::Atom, this is the correct pairCount.
masm.store32(Imm32(1), pairCountAddress);
// Initialize MatchPairs::pairs pointer
masm.computeEffectiveAddress(pairsArrayAddress, temp1);
masm.storePtr(temp1, pairsPointerAddress);
// Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch
masm.store32(Imm32(MatchPair::NoMatch), firstMatchStartAddress);
// Determine the set of volatile inputs to save when calling into C++ or
// regexp code.
LiveGeneralRegisterSet volatileRegs;
if (lastIndex.volatile_()) {
volatileRegs.add(lastIndex);
}
if (input.volatile_()) {
volatileRegs.add(input);
}
if (regexp.volatile_()) {
volatileRegs.add(regexp);
}
// Ensure the input string is not a rope.
Label isLinear;
masm.branchIfNotRope(input, &isLinear);
{
masm.PushRegsInMask(volatileRegs);
using Fn = JSLinearString* (*)(JSString*);
masm.setupUnalignedABICall(temp1);
masm.passABIArg(input);
masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
MOZ_ASSERT(!volatileRegs.has(temp1));
masm.storeCallPointerResult(temp1);
masm.PopRegsInMask(volatileRegs);
masm.branchTestPtr(Assembler::Zero, temp1, temp1, failure);
}
masm.bind(&isLinear);
// Load the RegExpShared.
Register regexpReg = temp1;
Address sharedSlot = Address(
regexp, NativeObject::getFixedSlotOffset(RegExpObject::SHARED_SLOT));
masm.branchTestUndefined(Assembler::Equal, sharedSlot, failure);
masm.unboxNonDouble(sharedSlot, regexpReg, JSVAL_TYPE_PRIVATE_GCTHING);
// Handle Atom matches
Label notAtom, checkSuccess;
masm.branchPtr(Assembler::Equal,
Address(regexpReg, RegExpShared::offsetOfPatternAtom()),
ImmWord(0), &notAtom);
{
masm.computeEffectiveAddress(matchPairsAddress, temp3);
masm.PushRegsInMask(volatileRegs);
using Fn =
RegExpRunStatus (*)(RegExpShared* re, const JSLinearString* input,
size_t start, MatchPairs* matchPairs);
masm.setupUnalignedABICall(temp2);
masm.passABIArg(regexpReg);
masm.passABIArg(input);
masm.passABIArg(lastIndex);
masm.passABIArg(temp3);
masm.callWithABI<Fn, js::ExecuteRegExpAtomRaw>();
MOZ_ASSERT(!volatileRegs.has(temp1));
masm.storeCallInt32Result(temp1);
masm.PopRegsInMask(volatileRegs);
masm.jump(&checkSuccess);
}
masm.bind(&notAtom);
// Don't handle regexps with too many capture pairs.
masm.load32(Address(regexpReg, RegExpShared::offsetOfPairCount()), temp2);
masm.branch32(Assembler::Above, temp2, Imm32(RegExpObject::MaxPairCount),
failure);
// Fill in the pair count in the MatchPairs on the stack.
masm.store32(temp2, pairCountAddress);
// Load code pointer and length of input (in bytes).
// Store the input start in the InputOutputData.
Register codePointer = temp1; // Note: temp1 was previously regexpReg.
Register byteLength = temp3;
{
Label isLatin1, done;
masm.loadStringLength(input, byteLength);
masm.branchLatin1String(input, &isLatin1);
// Two-byte input
masm.loadStringChars(input, temp2, CharEncoding::TwoByte);
masm.storePtr(temp2, inputStartAddress);
masm.loadPtr(
Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/false)),
codePointer);
masm.lshiftPtr(Imm32(1), byteLength);
masm.jump(&done);
// Latin1 input
masm.bind(&isLatin1);
masm.loadStringChars(input, temp2, CharEncoding::Latin1);
masm.storePtr(temp2, inputStartAddress);
masm.loadPtr(
Address(regexpReg, RegExpShared::offsetOfJitCode(/*latin1 =*/true)),
codePointer);
masm.bind(&done);
// Store end pointer
masm.addPtr(byteLength, temp2);
masm.storePtr(temp2, inputEndAddress);
}
// Guard that the RegExpShared has been compiled for this type of input.
// If it has not been compiled, we fall back to the OOL case, which will
// do a VM call into the interpreter.
// TODO: add an interpreter trampoline?
masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
// Finish filling in the InputOutputData instance on the stack
masm.computeEffectiveAddress(matchPairsAddress, temp2);
masm.storePtr(temp2, matchesAddress);
masm.storePtr(lastIndex, startIndexAddress);
// Execute the RegExp.
masm.computeEffectiveAddress(
Address(FramePointer, inputOutputDataStartOffset), temp2);
masm.PushRegsInMask(volatileRegs);
masm.setupUnalignedABICall(temp3);
masm.passABIArg(temp2);
masm.callWithABI(codePointer);
masm.storeCallInt32Result(temp1);
masm.PopRegsInMask(volatileRegs);
masm.bind(&checkSuccess);
masm.branch32(Assembler::Equal, temp1,
Imm32(int32_t(RegExpRunStatus::Success_NotFound)), notFound);
masm.branch32(Assembler::Equal, temp1, Imm32(int32_t(RegExpRunStatus::Error)),
failure);
// Lazily update the RegExpStatics.
size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
RegExpRealm::offsetOfRegExpStatics();
masm.loadGlobalObjectData(temp1);
masm.loadPtr(Address(temp1, offset), temp1);
UpdateRegExpStatics(masm, regexp, input, lastIndex, temp1, temp2, temp3,
initialStringHeap, volatileRegs);
return true;
}
static void EmitInitDependentStringBase(MacroAssembler& masm,
Register dependent, Register base,
Register temp1, Register temp2,
bool needsPostBarrier) {
// Determine the base string to use and store it in temp2.
Label notDependent, markedDependedOn;
masm.load32(Address(base, JSString::offsetOfFlags()), temp1);
masm.branchTest32(Assembler::Zero, temp1, Imm32(JSString::DEPENDENT_BIT),
&notDependent);
{
// The base is also a dependent string. Load its base to prevent chains of
// dependent strings in most cases. This must either be an atom or already
// have the DEPENDED_ON_BIT set.
masm.loadDependentStringBase(base, temp2);
masm.jump(&markedDependedOn);
}
masm.bind(&notDependent);
{
// The base is not a dependent string. Set the DEPENDED_ON_BIT if it's not
// an atom.
masm.movePtr(base, temp2);
masm.branchTest32(Assembler::NonZero, temp1, Imm32(JSString::ATOM_BIT),
&markedDependedOn);
masm.or32(Imm32(JSString::DEPENDED_ON_BIT), temp1);
masm.store32(temp1, Address(temp2, JSString::offsetOfFlags()));
}
masm.bind(&markedDependedOn);
#ifdef DEBUG
// Assert the base has the DEPENDED_ON_BIT set or is an atom.
Label isAppropriatelyMarked;
masm.branchTest32(Assembler::NonZero,
Address(temp2, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT | JSString::DEPENDED_ON_BIT),
&isAppropriatelyMarked);
masm.assumeUnreachable("Base string is missing DEPENDED_ON_BIT");
masm.bind(&isAppropriatelyMarked);
#endif
masm.storeDependentStringBase(temp2, dependent);
// Post-barrier the base store. The base is still in temp2.
if (needsPostBarrier) {
Label done;
masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
LiveRegisterSet regsToSave(RegisterSet::Volatile());
regsToSave.takeUnchecked(temp1);
regsToSave.takeUnchecked(temp2);
masm.PushRegsInMask(regsToSave);
masm.mov(ImmPtr(masm.runtime()), temp1);
using Fn = void (*)(JSRuntime* rt, js::gc::Cell* cell);
masm.setupUnalignedABICall(temp2);
masm.passABIArg(temp1);
masm.passABIArg(dependent);
masm.callWithABI<Fn, PostWriteBarrier>();
masm.PopRegsInMask(regsToSave);
masm.bind(&done);
} else {
#ifdef DEBUG
Label done;
masm.branchPtrInNurseryChunk(Assembler::Equal, dependent, temp1, &done);
masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp2, temp1, &done);
masm.assumeUnreachable("Missing post barrier for dependent string base");
masm.bind(&done);
#endif
}
}
static void CopyStringChars(MacroAssembler& masm, Register to, Register from,
Register len, Register byteOpScratch,
CharEncoding encoding,
size_t maximumLength = SIZE_MAX);
class CreateDependentString {
CharEncoding encoding_;
Register string_;
Register temp1_;
Register temp2_;
Label* failure_;
enum class FallbackKind : uint8_t {
InlineString,
FatInlineString,
NotInlineString,
Count
};
mozilla::EnumeratedArray<FallbackKind, Label, size_t(FallbackKind::Count)>
fallbacks_, joins_;
public:
CreateDependentString(CharEncoding encoding, Register string, Register temp1,
Register temp2, Label* failure)
: encoding_(encoding),
string_(string),
temp1_(temp1),
temp2_(temp2),
failure_(failure) {}
Register string() const { return string_; }
CharEncoding encoding() const { return encoding_; }
// Generate code that creates DependentString.
// Caller should call generateFallback after masm.ret(), to generate
// fallback path.
void generate(MacroAssembler& masm, const JSAtomState& names,
CompileRuntime* runtime, Register base,
BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
gc::Heap initialStringHeap);
// Generate fallback path for creating DependentString.
void generateFallback(MacroAssembler& masm);
};
void CreateDependentString::generate(MacroAssembler& masm,
const JSAtomState& names,
CompileRuntime* runtime, Register base,
BaseIndex startIndexAddress,
BaseIndex limitIndexAddress,
gc::Heap initialStringHeap) {
JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)",
(encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
auto newGCString = [&](FallbackKind kind) {
uint32_t flags = kind == FallbackKind::InlineString
? JSString::INIT_THIN_INLINE_FLAGS
: kind == FallbackKind::FatInlineString
? JSString::INIT_FAT_INLINE_FLAGS
: JSString::INIT_DEPENDENT_FLAGS;
if (encoding_ == CharEncoding::Latin1) {
flags |= JSString::LATIN1_CHARS_BIT;
}
if (kind != FallbackKind::FatInlineString) {
masm.newGCString(string_, temp2_, initialStringHeap, &fallbacks_[kind]);
} else {
masm.newGCFatInlineString(string_, temp2_, initialStringHeap,
&fallbacks_[kind]);
}
masm.bind(&joins_[kind]);
masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags()));
};
// Compute the string length.
masm.load32(startIndexAddress, temp2_);
masm.load32(limitIndexAddress, temp1_);
masm.sub32(temp2_, temp1_);
Label done, nonEmpty;
// Zero length matches use the empty string.
masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty);
masm.movePtr(ImmGCPtr(names.empty_), string_);
masm.jump(&done);
masm.bind(&nonEmpty);
// Complete matches use the base string.
Label nonBaseStringMatch;
masm.branchTest32(Assembler::NonZero, temp2_, temp2_, &nonBaseStringMatch);
masm.branch32(Assembler::NotEqual, Address(base, JSString::offsetOfLength()),
temp1_, &nonBaseStringMatch);
masm.movePtr(base, string_);
masm.jump(&done);
masm.bind(&nonBaseStringMatch);
Label notInline;
int32_t maxInlineLength = encoding_ == CharEncoding::Latin1
? JSFatInlineString::MAX_LENGTH_LATIN1
: JSFatInlineString::MAX_LENGTH_TWO_BYTE;
masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), &notInline);
{
// Make a thin or fat inline string.
Label stringAllocated, fatInline;
int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1
? JSThinInlineString::MAX_LENGTH_LATIN1
: JSThinInlineString::MAX_LENGTH_TWO_BYTE;
masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength),
&fatInline);
if (encoding_ == CharEncoding::Latin1) {
// One character Latin-1 strings can be loaded directly from the
// static strings table.
Label thinInline;
masm.branch32(Assembler::Above, temp1_, Imm32(1), &thinInline);
{
static_assert(
StaticStrings::UNIT_STATIC_LIMIT - 1 == JSString::MAX_LATIN1_CHAR,
"Latin-1 strings can be loaded from static strings");
masm.loadStringChars(base, temp1_, encoding_);
masm.loadChar(temp1_, temp2_, temp1_, encoding_);
masm.lookupStaticString(temp1_, string_, runtime->staticStrings());
masm.jump(&done);
}
masm.bind(&thinInline);
}
{
newGCString(FallbackKind::InlineString);
masm.jump(&stringAllocated);
}
masm.bind(&fatInline);
{
newGCString(FallbackKind::FatInlineString);
}
masm.bind(&stringAllocated);
masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
masm.push(string_);
masm.push(base);
MOZ_ASSERT(startIndexAddress.base == FramePointer,
"startIndexAddress is still valid after stack pushes");
// Load chars pointer for the new string.
masm.loadInlineStringCharsForStore(string_, string_);
// Load the source characters pointer.
masm.loadStringChars(base, temp2_, encoding_);
masm.load32(startIndexAddress, base);
masm.addToCharPtr(temp2_, base, encoding_);
CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_);
masm.pop(base);
masm.pop(string_);
masm.jump(&done);
}
masm.bind(&notInline);
{
// Make a dependent string.
// Warning: string may be tenured (if the fallback case is hit), so
// stores into it must be post barriered.
newGCString(FallbackKind::NotInlineString);
masm.store32(temp1_, Address(string_, JSString::offsetOfLength()));
masm.loadNonInlineStringChars(base, temp1_, encoding_);
masm.load32(startIndexAddress, temp2_);
masm.addToCharPtr(temp1_, temp2_, encoding_);
masm.storeNonInlineStringChars(temp1_, string_);
EmitInitDependentStringBase(masm, string_, base, temp1_, temp2_,
/* needsPostBarrier = */ true);
}
masm.bind(&done);
}
void CreateDependentString::generateFallback(MacroAssembler& masm) {
JitSpew(JitSpew_Codegen,
"# Emitting CreateDependentString fallback (encoding=%s)",
(encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte"));
LiveRegisterSet regsToSave(RegisterSet::Volatile());
regsToSave.takeUnchecked(string_);
regsToSave.takeUnchecked(temp2_);
for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) {
masm.bind(&fallbacks_[kind]);
masm.PushRegsInMask(regsToSave);
using Fn = void* (*)(JSContext* cx);
masm.setupUnalignedABICall(string_);
masm.loadJSContext(string_);
masm.passABIArg(string_);
if (kind == FallbackKind::FatInlineString) {
masm.callWithABI<Fn, AllocateFatInlineString>();
} else {
masm.callWithABI<Fn, AllocateDependentString>();
}
masm.storeCallPointerResult(string_);
masm.PopRegsInMask(regsToSave);
masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_);
masm.jump(&joins_[kind]);
}
}
// Generate the RegExpMatcher and RegExpExecMatch stubs. These are very similar,
// but RegExpExecMatch also has to load and update .lastIndex for global/sticky
// regular expressions.
static JitCode* GenerateRegExpMatchStubShared(JSContext* cx,
gc::Heap initialStringHeap,
bool isExecMatch) {
if (isExecMatch) {
JitSpew(JitSpew_Codegen, "# Emitting RegExpExecMatch stub");
} else {
JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub");
}
// |initialStringHeap| could be stale after a GC.
JS::AutoCheckCannotGC nogc(cx);
Register regexp = RegExpMatcherRegExpReg;
Register input = RegExpMatcherStringReg;
Register lastIndex = RegExpMatcherLastIndexReg;
ValueOperand result = JSReturnOperand;
// We are free to clobber all registers, as LRegExpMatcher is a call
// instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
regs.take(lastIndex);
Register temp1 = regs.takeAny();
Register temp2 = regs.takeAny();
Register temp3 = regs.takeAny();
Register maybeTemp4 = InvalidReg;
if (!regs.empty()) {
// There are not enough registers on x86.
maybeTemp4 = regs.takeAny();
}
Register maybeTemp5 = InvalidReg;
if (!regs.empty()) {
// There are not enough registers on x86.
maybeTemp5 = regs.takeAny();
}
Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
TempAllocator temp(&cx->tempLifoAlloc());
JitContext jcx(cx);
StackMacroAssembler masm(cx, temp);
AutoCreatedBy acb(masm, "GenerateRegExpMatchStubShared");
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
masm.push(FramePointer);
masm.moveStackPtrTo(FramePointer);
Label notFoundZeroLastIndex;
if (isExecMatch) {
masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
}
// The InputOutputData is placed above the frame pointer and return address on
// the stack.
int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
Label notFound, oolEntry;
if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
temp3, inputOutputDataStartOffset,
initialStringHeap, &notFound, &oolEntry)) {
return nullptr;
}
// If a regexp has named captures, fall back to the OOL stub, which
// will end up calling CreateRegExpMatchResults.
Register shared = temp2;
masm.unboxNonDouble(Address(regexp, NativeObject::getFixedSlotOffset(
RegExpObject::SHARED_SLOT)),
shared, JSVAL_TYPE_PRIVATE_GCTHING);
masm.branchPtr(Assembler::NotEqual,
Address(shared, RegExpShared::offsetOfGroupsTemplate()),
ImmWord(0), &oolEntry);
// Similarly, if the |hasIndices| flag is set, fall back to the OOL stub.
masm.branchTest32(Assembler::NonZero,
Address(shared, RegExpShared::offsetOfFlags()),
Imm32(int32_t(JS::RegExpFlag::HasIndices)), &oolEntry);
Address pairCountAddress =
RegExpPairCountAddress(masm, inputOutputDataStartOffset);
// Construct the result.
Register object = temp1;
{
// In most cases, the array will have just 1-2 elements, so we optimize for
// that by emitting separate code paths for capacity 2/6/14 (= 4/8/16 slots
// because two slots are used for the elements header).
// Load the array length in temp2 and the shape in temp3.
Label allocated;
masm.load32(pairCountAddress, temp2);
size_t offset = GlobalObjectData::offsetOfRegExpRealm() +
RegExpRealm::offsetOfNormalMatchResultShape();
masm.loadGlobalObjectData(temp3);
masm.loadPtr(Address(temp3, offset), temp3);
auto emitAllocObject = [&](size_t elementCapacity) {
gc::AllocKind kind = GuessArrayGCKind(elementCapacity);
MOZ_ASSERT(gc::GetObjectFinalizeKind(&ArrayObject::class_) ==
gc::FinalizeKind::None);
MOZ_ASSERT(!IsFinalizedKind(kind));
#ifdef DEBUG
// Assert all of the available slots are used for |elementCapacity|
// elements.
size_t usedSlots = ObjectElements::VALUES_PER_HEADER + elementCapacity;
MOZ_ASSERT(usedSlots == GetGCKindSlots(kind));
#endif
constexpr size_t numUsedDynamicSlots =
RegExpRealm::MatchResultObjectSlotSpan;
constexpr size_t numDynamicSlots =
RegExpRealm::MatchResultObjectNumDynamicSlots;
constexpr size_t arrayLength = 1;
masm.createArrayWithFixedElements(object, temp3, temp2, temp3,
arrayLength, elementCapacity,
numUsedDynamicSlots, numDynamicSlots,
kind, gc::Heap::Default, &oolEntry);
};
Label moreThan2;
masm.branch32(Assembler::Above, temp2, Imm32(2), &moreThan2);
emitAllocObject(2);
masm.jump(&allocated);
Label moreThan6;
masm.bind(&moreThan2);
masm.branch32(Assembler::Above, temp2, Imm32(6), &moreThan6);
emitAllocObject(6);
masm.jump(&allocated);
masm.bind(&moreThan6);
static_assert(RegExpObject::MaxPairCount == 14);
emitAllocObject(RegExpObject::MaxPairCount);
masm.bind(&allocated);
}
// clang-format off
/*
* [SMDOC] Stack layout for the RegExpMatcher stub
*
* +---------------+
* FramePointer +-----> |Caller-FramePtr|
* +---------------+
* |Return-Address |
* +---------------+
* inputOutputDataStartOffset +-----> +---------------+
* |InputOutputData|
* +---------------+
* +---------------+
* | MatchPairs |
* pairsCountAddress +-----------> count |
* | pairs |
* | |
* +---------------+
* pairsVectorStartOffset +-----> +---------------+
* | MatchPair |
* matchPairStart +------------> start | <-------+
* matchPairLimit +------------> limit | | Reserved space for
* +---------------+ | `RegExpObject::MaxPairCount`
* . | MatchPair objects.
* . |
* . | `count` objects will be
* +---------------+ | initialized and can be
* | MatchPair | | accessed below.
* | start | <-------+
* | limit |
* +---------------+
*/
// clang-format on
static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t),
"MatchPair consists of two int32 values representing the start"
"and the end offset of the match");
int32_t pairsVectorStartOffset =
RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
// Incremented by one below for each match pair.
Register matchIndex = temp2;
masm.move32(Imm32(0), matchIndex);
// The element in which to store the result of the current match.
size_t elementsOffset = NativeObject::offsetOfFixedElements();
BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset);
// The current match pair's "start" and "limit" member.
BaseIndex matchPairStart(FramePointer, matchIndex, TimesEight,
pairsVectorStartOffset + MatchPair::offsetOfStart());
BaseIndex matchPairLimit(FramePointer, matchIndex, TimesEight,
pairsVectorStartOffset + MatchPair::offsetOfLimit());
Label* depStrFailure = &oolEntry;
Label restoreRegExpAndLastIndex;
Register temp4;
if (maybeTemp4 == InvalidReg) {
depStrFailure = &restoreRegExpAndLastIndex;
// We don't have enough registers for a fourth temporary. Reuse |regexp|
// as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
masm.push(regexp);
temp4 = regexp;
} else {
temp4 = maybeTemp4;
}
Register temp5;
if (maybeTemp5 == InvalidReg) {
depStrFailure = &restoreRegExpAndLastIndex;
// We don't have enough registers for a fifth temporary. Reuse |lastIndex|
// as a temporary. We restore its value at |restoreRegExpAndLastIndex|.
masm.push(lastIndex);
temp5 = lastIndex;
} else {
temp5 = maybeTemp5;
}
auto maybeRestoreRegExpAndLastIndex = [&]() {
if (maybeTemp5 == InvalidReg) {
masm.pop(lastIndex);
}
if (maybeTemp4 == InvalidReg) {
masm.pop(regexp);
}
};
// Loop to construct the match strings. There are two different loops,
// depending on whether the input is a Two-Byte or a Latin-1 string.
CreateDependentString depStrs[]{
{CharEncoding::TwoByte, temp3, temp4, temp5, depStrFailure},
{CharEncoding::Latin1, temp3, temp4, temp5, depStrFailure},
};
{
Label isLatin1, done;
masm.branchLatin1String(input, &isLatin1);
for (auto& depStr : depStrs) {
if (depStr.encoding() == CharEncoding::Latin1) {
masm.bind(&isLatin1);
}
Label matchLoop;
masm.bind(&matchLoop);
static_assert(MatchPair::NoMatch == -1,
"MatchPair::start is negative if no match was found");
Label isUndefined, storeDone;
masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0),
&isUndefined);
{
depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()),
input, matchPairStart, matchPairLimit,
initialStringHeap);
// Storing into nursery-allocated results object's elements; no post
// barrier.
masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement);
masm.jump(&storeDone);
}
masm.bind(&isUndefined);
{
masm.storeValue(UndefinedValue(), objectMatchElement);
}
masm.bind(&storeDone);
masm.add32(Imm32(1), matchIndex);
masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex,
&done);
masm.jump(&matchLoop);
}
#ifdef DEBUG
masm.assumeUnreachable("The match string loop doesn't fall through.");
#endif
masm.bind(&done);
}
maybeRestoreRegExpAndLastIndex();
// Fill in the rest of the output object.
masm.store32(
matchIndex,
Address(object,
elementsOffset + ObjectElements::offsetOfInitializedLength()));
masm.store32(
matchIndex,
Address(object, elementsOffset + ObjectElements::offsetOfLength()));
Address firstMatchPairStartAddress(
FramePointer, pairsVectorStartOffset + MatchPair::offsetOfStart());
Address firstMatchPairLimitAddress(
FramePointer, pairsVectorStartOffset + MatchPair::offsetOfLimit());
static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0,
"First slot holds the 'index' property");
static_assert(RegExpRealm::MatchResultObjectInputSlot == 1,
"Second slot holds the 'input' property");
masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
masm.load32(firstMatchPairStartAddress, temp3);
masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
// No post barrier needed (address is within nursery object.)
masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
// For the ExecMatch stub, if the regular expression is global or sticky, we
// have to update its .lastIndex slot.
if (isExecMatch) {
MOZ_ASSERT(object != lastIndex);
Label notGlobalOrSticky;
masm.branchTest32(Assembler::Zero, flagsSlot,
Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
&notGlobalOrSticky);
masm.load32(firstMatchPairLimitAddress, lastIndex);
masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
masm.bind(&notGlobalOrSticky);
}
// All done!
masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
masm.pop(FramePointer);
masm.ret();
masm.bind(&notFound);
if (isExecMatch) {
Label notGlobalOrSticky;
masm.branchTest32(Assembler::Zero, flagsSlot,
Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
&notGlobalOrSticky);
masm.bind(&notFoundZeroLastIndex);
masm.storeValue(Int32Value(0), lastIndexSlot);
masm.bind(&notGlobalOrSticky);
}
masm.moveValue(NullValue(), result);
masm.pop(FramePointer);
masm.ret();
// Fallback paths for CreateDependentString.
for (auto& depStr : depStrs) {
depStr.generateFallback(masm);
}
// Fall-through to the ool entry after restoring the registers.
masm.bind(&restoreRegExpAndLastIndex);
maybeRestoreRegExpAndLastIndex();
// Use an undefined value to signal to the caller that the OOL stub needs to
// be called.
masm.bind(&oolEntry);
masm.moveValue(UndefinedValue(), result);
masm.pop(FramePointer);
masm.ret();
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
}
const char* name = isExecMatch ? "RegExpExecMatchStub" : "RegExpMatcherStub";
CollectPerfSpewerJitCodeProfile(code, name);
#ifdef MOZ_VTUNE
vtune::MarkStub(code, name);
#endif
return code;
}
JitCode* JitZone::generateRegExpMatcherStub(JSContext* cx) {
return GenerateRegExpMatchStubShared(cx, initialStringHeap,
/* isExecMatch = */ false);
}
JitCode* JitZone::generateRegExpExecMatchStub(JSContext* cx) {
return GenerateRegExpMatchStubShared(cx, initialStringHeap,
/* isExecMatch = */ true);
}
void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) {
MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg);
MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
#if defined(JS_NUNBOX32)
static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Type);
static_assert(RegExpMatcherLastIndexReg != JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
static_assert(RegExpMatcherRegExpReg != JSReturnReg);
static_assert(RegExpMatcherStringReg != JSReturnReg);
static_assert(RegExpMatcherLastIndexReg != JSReturnReg);
#endif
masm.reserveStack(RegExpReservedStack);
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
Register lastIndex = ToRegister(lir->lastIndex());
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(lastIndex);
regs.take(input);
regs.take(regexp);
Register temp = regs.takeAny();
masm.computeEffectiveAddress(
Address(masm.getStackPointer(), InputOutputDataSize), temp);
pushArg(temp);
pushArg(lastIndex);
pushArg(input);
pushArg(regexp);
// We are not using oolCallVM because we are in a Call, and that live
// registers are already saved by the the register allocator.
using Fn = bool (*)(JSContext*, HandleObject regexp, HandleString input,
int32_t lastIndex, MatchPairs* pairs,
MutableHandleValue output);
callVM<Fn, RegExpMatcherRaw>(lir);
masm.jump(ool.rejoin());
});
addOutOfLineCode(ool, lir->mir());
JitCode* regExpMatcherStub =
snapshot_->getZoneStub(JitZone::StubKind::RegExpMatcher);
masm.call(regExpMatcherStub);
masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
masm.bind(ool->rejoin());
masm.freeStack(RegExpReservedStack);
}
void CodeGenerator::visitRegExpExecMatch(LRegExpExecMatch* lir) {
MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg);
MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg);
MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand);
#if defined(JS_NUNBOX32)
static_assert(RegExpMatcherRegExpReg != JSReturnReg_Type);
static_assert(RegExpMatcherRegExpReg != JSReturnReg_Data);
static_assert(RegExpMatcherStringReg != JSReturnReg_Type);
static_assert(RegExpMatcherStringReg != JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
static_assert(RegExpMatcherRegExpReg != JSReturnReg);
static_assert(RegExpMatcherStringReg != JSReturnReg);
#endif
masm.reserveStack(RegExpReservedStack);
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
Register temp = regs.takeAny();
masm.computeEffectiveAddress(
Address(masm.getStackPointer(), InputOutputDataSize), temp);
pushArg(temp);
pushArg(input);
pushArg(regexp);
// We are not using oolCallVM because we are in a Call and live registers
// have already been saved by the register allocator.
using Fn =
bool (*)(JSContext*, Handle<RegExpObject*> regexp, HandleString input,
MatchPairs* pairs, MutableHandleValue output);
callVM<Fn, RegExpBuiltinExecMatchFromJit>(lir);
masm.jump(ool.rejoin());
});
addOutOfLineCode(ool, lir->mir());
JitCode* regExpExecMatchStub =
snapshot_->getZoneStub(JitZone::StubKind::RegExpExecMatch);
masm.call(regExpExecMatchStub);
masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
masm.bind(ool->rejoin());
masm.freeStack(RegExpReservedStack);
}
JitCode* JitZone::generateRegExpSearcherStub(JSContext* cx) {
JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub");
Register regexp = RegExpSearcherRegExpReg;
Register input = RegExpSearcherStringReg;
Register lastIndex = RegExpSearcherLastIndexReg;
Register result = ReturnReg;
// We are free to clobber all registers, as LRegExpSearcher is a call
// instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
regs.take(lastIndex);
Register temp1 = regs.takeAny();
Register temp2 = regs.takeAny();
Register temp3 = regs.takeAny();
TempAllocator temp(&cx->tempLifoAlloc());
JitContext jcx(cx);
StackMacroAssembler masm(cx, temp);
AutoCreatedBy acb(masm, "JitZone::generateRegExpSearcherStub");
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
masm.push(FramePointer);
masm.moveStackPtrTo(FramePointer);
#ifdef DEBUG
// Store sentinel value to cx->regExpSearcherLastLimit.
// See comment in RegExpSearcherImpl.
masm.loadJSContext(temp1);
masm.store32(Imm32(RegExpSearcherLastLimitSentinel),
Address(temp1, JSContext::offsetOfRegExpSearcherLastLimit()));
#endif
// The InputOutputData is placed above the frame pointer and return address on
// the stack.
int32_t inputOutputDataStartOffset = 2 * sizeof(void*);
Label notFound, oolEntry;
if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
temp3, inputOutputDataStartOffset,
initialStringHeap, &notFound, &oolEntry)) {
return nullptr;
}
// clang-format off
/*
* [SMDOC] Stack layout for the RegExpSearcher stub
*
* +---------------+
* FramePointer +-----> |Caller-FramePtr|
* +---------------+
* |Return-Address |
* +---------------+
* inputOutputDataStartOffset +-----> +---------------+
* |InputOutputData|
* +---------------+
* +---------------+
* | MatchPairs |
* | count |
* | pairs |
* | |
* +---------------+
* pairsVectorStartOffset +-----> +---------------+
* | MatchPair |
* matchPairStart +------------> start | <-------+
* matchPairLimit +------------> limit | | Reserved space for
* +---------------+ | `RegExpObject::MaxPairCount`
* . | MatchPair objects.
* . |
* . | Only a single object will
* +---------------+ | be initialized and can be
* | MatchPair | | accessed below.
* | start | <-------+
* | limit |
* +---------------+
*/
// clang-format on
int32_t pairsVectorStartOffset =
RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
Address matchPairStart(FramePointer,
pairsVectorStartOffset + MatchPair::offsetOfStart());
Address matchPairLimit(FramePointer,
pairsVectorStartOffset + MatchPair::offsetOfLimit());
// Store match limit to cx->regExpSearcherLastLimit and return the index.
masm.load32(matchPairLimit, result);
masm.loadJSContext(input);
masm.store32(result,
Address(input, JSContext::offsetOfRegExpSearcherLastLimit()));
masm.load32(matchPairStart, result);
masm.pop(FramePointer);
masm.ret();
masm.bind(&notFound);
masm.move32(Imm32(RegExpSearcherResultNotFound), result);
masm.pop(FramePointer);
masm.ret();
masm.bind(&oolEntry);
masm.move32(Imm32(RegExpSearcherResultFailed), result);
masm.pop(FramePointer);
masm.ret();
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
}
CollectPerfSpewerJitCodeProfile(code, "RegExpSearcherStub");
#ifdef MOZ_VTUNE
vtune::MarkStub(code, "RegExpSearcherStub");
#endif
return code;
}
void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) {
MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpSearcherRegExpReg);
MOZ_ASSERT(ToRegister(lir->string()) == RegExpSearcherStringReg);
MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpSearcherLastIndexReg);
MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
static_assert(RegExpSearcherRegExpReg != ReturnReg);
static_assert(RegExpSearcherStringReg != ReturnReg);
static_assert(RegExpSearcherLastIndexReg != ReturnReg);
masm.reserveStack(RegExpReservedStack);
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
Register lastIndex = ToRegister(lir->lastIndex());
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(lastIndex);
regs.take(input);
regs.take(regexp);
Register temp = regs.takeAny();
masm.computeEffectiveAddress(
Address(masm.getStackPointer(), InputOutputDataSize), temp);
pushArg(temp);
pushArg(lastIndex);
pushArg(input);
pushArg(regexp);
// We are not using oolCallVM because we are in a Call, and that live
// registers are already saved by the the register allocator.
using Fn = bool (*)(JSContext* cx, HandleObject regexp, HandleString input,
int32_t lastIndex, MatchPairs* pairs, int32_t* result);
callVM<Fn, RegExpSearcherRaw>(lir);
masm.jump(ool.rejoin());
});
addOutOfLineCode(ool, lir->mir());
JitCode* regExpSearcherStub =
snapshot_->getZoneStub(JitZone::StubKind::RegExpSearcher);
masm.call(regExpSearcherStub);
masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed),
ool->entry());
masm.bind(ool->rejoin());
masm.freeStack(RegExpReservedStack);
}
void CodeGenerator::visitRegExpSearcherLastLimit(
LRegExpSearcherLastLimit* lir) {
Register result = ToRegister(lir->output());
Register scratch = ToRegister(lir->temp0());
masm.loadAndClearRegExpSearcherLastLimit(result, scratch);
}
JitCode* JitZone::generateRegExpExecTestStub(JSContext* cx) {
JitSpew(JitSpew_Codegen, "# Emitting RegExpExecTest stub");
Register regexp = RegExpExecTestRegExpReg;
Register input = RegExpExecTestStringReg;
Register result = ReturnReg;
TempAllocator temp(&cx->tempLifoAlloc());
JitContext jcx(cx);
StackMacroAssembler masm(cx, temp);
AutoCreatedBy acb(masm, "JitZone::generateRegExpExecTestStub");
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
masm.push(FramePointer);
masm.moveStackPtrTo(FramePointer);
// We are free to clobber all registers, as LRegExpExecTest is a call
// instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
// Ensure lastIndex != result.
regs.take(result);
Register lastIndex = regs.takeAny();
regs.add(result);
Register temp1 = regs.takeAny();
Register temp2 = regs.takeAny();
Register temp3 = regs.takeAny();
Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
masm.reserveStack(RegExpReservedStack);
// Load lastIndex and skip RegExp execution if needed.
Label notFoundZeroLastIndex;
masm.loadRegExpLastIndex(regexp, input, lastIndex, &notFoundZeroLastIndex);
// In visitRegExpMatcher and visitRegExpSearcher, we reserve stack space
// before calling the stub. For RegExpExecTest we call the stub before
// reserving stack space, so the offset of the InputOutputData relative to the
// frame pointer is negative.
constexpr int32_t inputOutputDataStartOffset = -int32_t(RegExpReservedStack);
// On ARM64, load/store instructions can encode an immediate offset in the
// range [-256, 4095]. If we ever fail this assertion, it would be more
// efficient to store the data above the frame pointer similar to
// RegExpMatcher and RegExpSearcher.
static_assert(inputOutputDataStartOffset >= -256);
Label notFound, oolEntry;
if (!PrepareAndExecuteRegExp(masm, regexp, input, lastIndex, temp1, temp2,
temp3, inputOutputDataStartOffset,
initialStringHeap, &notFound, &oolEntry)) {
return nullptr;
}
// Set `result` to true/false to indicate found/not-found, or to
// RegExpExecTestResultFailed if we have to retry in C++. If the regular
// expression is global or sticky, we also have to update its .lastIndex slot.
Label done;
int32_t pairsVectorStartOffset =
RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
Address matchPairLimit(FramePointer,
pairsVectorStartOffset + MatchPair::offsetOfLimit());
masm.move32(Imm32(1), result);
masm.branchTest32(Assembler::Zero, flagsSlot,
Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
&done);
masm.load32(matchPairLimit, lastIndex);
masm.storeValue(JSVAL_TYPE_INT32, lastIndex, lastIndexSlot);
masm.jump(&done);
masm.bind(&notFound);
masm.move32(Imm32(0), result);
masm.branchTest32(Assembler::Zero, flagsSlot,
Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
&done);
masm.storeValue(Int32Value(0), lastIndexSlot);
masm.jump(&done);
masm.bind(&notFoundZeroLastIndex);
masm.move32(Imm32(0), result);
masm.storeValue(Int32Value(0), lastIndexSlot);
masm.jump(&done);
masm.bind(&oolEntry);
masm.move32(Imm32(RegExpExecTestResultFailed), result);
masm.bind(&done);
masm.freeStack(RegExpReservedStack);
masm.pop(FramePointer);
masm.ret();
Linker linker(masm);
JitCode* code = linker.newCode(cx, CodeKind::Other);
if (!code) {
return nullptr;
}
CollectPerfSpewerJitCodeProfile(code, "RegExpExecTestStub");
#ifdef MOZ_VTUNE
vtune::MarkStub(code, "RegExpExecTestStub");
#endif
return code;
}
void CodeGenerator::visitRegExpExecTest(LRegExpExecTest* lir) {
MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpExecTestRegExpReg);
MOZ_ASSERT(ToRegister(lir->string()) == RegExpExecTestStringReg);
MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
static_assert(RegExpExecTestRegExpReg != ReturnReg);
static_assert(RegExpExecTestStringReg != ReturnReg);
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
pushArg(input);
pushArg(regexp);
// We are not using oolCallVM because we are in a Call and live registers
// have already been saved by the register allocator.
using Fn = bool (*)(JSContext* cx, Handle<RegExpObject*> regexp,
HandleString input, bool* result);
callVM<Fn, RegExpBuiltinExecTestFromJit>(lir);
masm.jump(ool.rejoin());
});
addOutOfLineCode(ool, lir->mir());
JitCode* regExpExecTestStub =
snapshot_->getZoneStub(JitZone::StubKind::RegExpExecTest);
masm.call(regExpExecTestStub);
masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpExecTestResultFailed),
ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitRegExpHasCaptureGroups(LRegExpHasCaptureGroups* ins) {
Register regexp = ToRegister(ins->regexp());
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
using Fn =
bool (*)(JSContext*, Handle<RegExpObject*>, Handle<JSString*>, bool*);
auto* ool = oolCallVM<Fn, js::RegExpHasCaptureGroups>(
ins, ArgList(regexp, input), StoreRegisterTo(output));
// Load RegExpShared in |output|.
Label vmCall;
masm.loadParsedRegExpShared(regexp, output, ool->entry());
// Return true iff pairCount > 1.
Label returnTrue;
masm.branch32(Assembler::Above,
Address(output, RegExpShared::offsetOfPairCount()), Imm32(1),
&returnTrue);
masm.move32(Imm32(0), output);
masm.jump(ool->rejoin());
masm.bind(&returnTrue);
masm.move32(Imm32(1), output);
masm.bind(ool->rejoin());
}
static void FindFirstDollarIndex(MacroAssembler& masm, Register str,
Register len, Register temp0, Register temp1,
Register output, CharEncoding encoding) {
#ifdef DEBUG
Label ok;
masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok);
masm.assumeUnreachable("Length should be greater than 0.");
masm.bind(&ok);
#endif
Register chars = temp0;
masm.loadStringChars(str, chars, encoding);
masm.move32(Imm32(0), output);
Label start, done;
masm.bind(&start);
Register currentChar = temp1;
masm.loadChar(chars, output, currentChar, encoding);
masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done);
masm.add32(Imm32(1), output);
masm.branch32(Assembler::NotEqual, output, len, &start);
masm.move32(Imm32(-1), output);
masm.bind(&done);
}
void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) {
Register str = ToRegister(ins->str());
Register output = ToRegister(ins->output());
Register temp0 = ToRegister(ins->temp0());
Register temp1 = ToRegister(ins->temp1());
Register len = ToRegister(ins->temp2());
using Fn = bool (*)(JSContext*, JSString*, int32_t*);
OutOfLineCode* ool = oolCallVM<Fn, GetFirstDollarIndexRaw>(
ins, ArgList(str), StoreRegisterTo(output));
masm.branchIfRope(str, ool->entry());
masm.loadStringLength(str, len);
Label isLatin1, done;
masm.branchLatin1String(str, &isLatin1);
{
FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
CharEncoding::TwoByte);
masm.jump(&done);
}
masm.bind(&isLatin1);
{
FindFirstDollarIndex(masm, str, len, temp0, temp1, output,
CharEncoding::Latin1);
}
masm.bind(&done);
masm.bind(ool->rejoin());
}
void CodeGenerator::visitStringReplace(LStringReplace* lir) {
if (lir->replacement()->isConstant()) {
pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
} else {
pushArg(ToRegister(lir->replacement()));
}
if (lir->pattern()->isConstant()) {
pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
} else {
pushArg(ToRegister(lir->pattern()));
}
if (lir->string()->isConstant()) {
pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
} else {
pushArg(ToRegister(lir->string()));
}
using Fn =
JSString* (*)(JSContext*, HandleString, HandleString, HandleString);
if (lir->mir()->isFlatReplacement()) {
callVM<Fn, StringFlatReplaceString>(lir);
} else {
callVM<Fn, StringReplace>(lir);
}
}
void CodeGenerator::visitBinaryValueCache(LBinaryValueCache* lir) {
LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(lir->lhs()));
TypedOrValueRegister rhs = TypedOrValueRegister(ToValue(lir->rhs()));
ValueOperand output = ToOutValue(lir);
JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
switch (jsop) {
case JSOp::Add:
case JSOp::Sub:
case JSOp::Mul:
case JSOp::Div:
case JSOp::Mod:
case JSOp::Pow:
case JSOp::BitAnd:
case JSOp::BitOr:
case JSOp::BitXor:
case JSOp::Lsh:
case JSOp::Rsh:
case JSOp::Ursh: {
IonBinaryArithIC ic(liveRegs, lhs, rhs, output);
addIC(lir, allocateIC(ic));
return;
}
default:
MOZ_CRASH("Unsupported jsop in MBinaryValueCache");
}
}
void CodeGenerator::visitBinaryBoolCache(LBinaryBoolCache* lir) {
LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(lir->lhs()));
TypedOrValueRegister rhs = TypedOrValueRegister(ToValue(lir->rhs()));
Register output = ToRegister(lir->output());
JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
switch (jsop) {
case JSOp::Lt:
case JSOp::Le:
case JSOp::Gt:
case JSOp::Ge:
case JSOp::Eq:
case JSOp::Ne:
case JSOp::StrictEq:
case JSOp::StrictNe: {
IonCompareIC ic(liveRegs, lhs, rhs, output);
addIC(lir, allocateIC(ic));
return;
}
default:
MOZ_CRASH("Unsupported jsop in MBinaryBoolCache");
}
}
void CodeGenerator::visitUnaryCache(LUnaryCache* lir) {
LiveRegisterSet liveRegs = lir->safepoint()->liveRegs();
TypedOrValueRegister input = TypedOrValueRegister(ToValue(lir->input()));
ValueOperand output = ToOutValue(lir);
IonUnaryArithIC ic(liveRegs, input, output);
addIC(lir, allocateIC(ic));
}
void CodeGenerator::visitModuleMetadata(LModuleMetadata* lir) {
pushArg(ImmPtr(lir->mir()->module()));
using Fn = JSObject* (*)(JSContext*, HandleObject);
callVM<Fn, js::GetOrCreateModuleMetaObject>(lir);
}
void CodeGenerator::visitDynamicImport(LDynamicImport* lir) {
pushArg(ToValue(lir->options()));
pushArg(ToValue(lir->specifier()));
pushArg(ImmGCPtr(current->mir()->info().script()));
using Fn = JSObject* (*)(JSContext*, HandleScript, HandleValue, HandleValue);
callVM<Fn, js::StartDynamicModuleImport>(lir);
}
void CodeGenerator::visitLambda(LLambda* lir) {
Register envChain = ToRegister(lir->environmentChain());
Register output = ToRegister(lir->output());
Register tempReg = ToRegister(lir->temp0());
gc::Heap heap = lir->mir()->initialHeap();
JSFunction* fun = lir->mir()->templateFunction();
MOZ_ASSERT(fun->isTenured());
using Fn = JSObject* (*)(JSContext*, HandleFunction, HandleObject, gc::Heap);
OutOfLineCode* ool = oolCallVM<Fn, js::LambdaOptimizedFallback>(
lir, ArgList(ImmGCPtr(fun), envChain, Imm32(uint32_t(heap))),
StoreRegisterTo(output));
TemplateObject templateObject(fun);
masm.createGCObject(output, tempReg, templateObject, heap, ool->entry(),
/* initContents = */ true,
AllocSiteInput(gc::CatchAllAllocSite::Optimized));
masm.storeValue(JSVAL_TYPE_OBJECT, envChain,
Address(output, JSFunction::offsetOfEnvironment()));
// If we specified the tenured heap then we need a post barrier. Otherwise no
// post barrier needed as the output is guaranteed to be allocated in the
// nursery.
if (heap == gc::Heap::Tenured) {
Label skipBarrier;
masm.branchPtrInNurseryChunk(Assembler::NotEqual, envChain, tempReg,
&skipBarrier);
saveVolatile(tempReg);
emitPostWriteBarrier(output);
restoreVolatile(tempReg);
masm.bind(&skipBarrier);
}
masm.bind(ool->rejoin());
}
void CodeGenerator::visitFunctionWithProto(LFunctionWithProto* lir) {
Register envChain = ToRegister(lir->envChain());
Register prototype = ToRegister(lir->prototype());
pushArg(prototype);
pushArg(envChain);
pushArg(ImmGCPtr(lir->mir()->function()));
using Fn =
JSObject* (*)(JSContext*, HandleFunction, HandleObject, HandleObject);
callVM<Fn, js::FunWithProtoOperation>(lir);
}
void CodeGenerator::visitSetFunName(LSetFunName* lir) {
pushArg(Imm32(lir->mir()->prefixKind()));
pushArg(ToValue(lir->name()));
pushArg(ToRegister(lir->fun()));
using Fn =
bool (*)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind);
callVM<Fn, js::SetFunctionName>(lir);
}
void CodeGenerator::visitOsiPoint(LOsiPoint* lir) {
// Note: markOsiPoint ensures enough space exists between the last
// LOsiPoint and this one to patch adjacent call instructions.
MOZ_ASSERT(masm.framePushed() == frameSize());
uint32_t osiCallPointOffset = markOsiPoint(lir);
LSafepoint* safepoint = lir->associatedSafepoint();
MOZ_ASSERT(!safepoint->osiCallPointOffset());
safepoint->setOsiCallPointOffset(osiCallPointOffset);
#ifdef DEBUG
// There should be no movegroups or other instructions between
// an instruction and its OsiPoint. This is necessary because
// we use the OsiPoint's snapshot from within VM calls.
for (LInstructionReverseIterator iter(current->rbegin(lir));
iter != current->rend(); iter++) {
if (*iter == lir) {
continue;
}
MOZ_ASSERT(!iter->isMoveGroup());
MOZ_ASSERT(iter->safepoint() == safepoint);
break;
}
#endif
#ifdef CHECK_OSIPOINT_REGISTERS
if (shouldVerifyOsiPointRegs(safepoint)) {
verifyOsiPointRegs(safepoint);
}
#endif
}
void CodeGenerator::visitPhi(LPhi* lir) {
MOZ_CRASH("Unexpected LPhi in CodeGenerator");
}
void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); }
void CodeGenerator::visitTableSwitch(LTableSwitch* ins) {
MTableSwitch* mir = ins->mir();
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
Register intIndex;
if (mir->getOperand(0)->type() != MIRType::Int32) {
intIndex = ToRegister(ins->temp0());
// The input is a double, so try and convert it to an integer.
// If it does not fit in an integer, take the default case.
masm.convertDoubleToInt32(ToFloatRegister(ins->index()), intIndex,
defaultcase, false);
} else {
intIndex = ToRegister(ins->index());
}
emitTableSwitchDispatch(mir, intIndex, ToTempRegisterOrInvalid(ins->temp1()));
}
void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) {
MTableSwitch* mir = ins->mir();
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
Register index = ToRegister(ins->temp0());
ValueOperand value = ToValue(ins->input());
Register tag = masm.extractTag(value, index);
masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
Label unboxInt, isInt;
masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
{
FloatRegister floatIndex = ToFloatRegister(ins->temp1());
masm.unboxDouble(value, floatIndex);
masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
masm.jump(&isInt);
}
masm.bind(&unboxInt);
masm.unboxInt32(value, index);
masm.bind(&isInt);
emitTableSwitchDispatch(mir, index, ToTempRegisterOrInvalid(ins->temp2()));
}
void CodeGenerator::visitParameter(LParameter* lir) {}
void CodeGenerator::visitCallee(LCallee* lir) {
Register callee = ToRegister(lir->output());
Address ptr(FramePointer, JitFrameLayout::offsetOfCalleeToken());
masm.loadFunctionFromCalleeToken(ptr, callee);
}
void CodeGenerator::visitIsConstructing(LIsConstructing* lir) {
Register output = ToRegister(lir->output());
Address calleeToken(FramePointer, JitFrameLayout::offsetOfCalleeToken());
masm.loadPtr(calleeToken, output);
// We must be inside a function.
MOZ_ASSERT(current->mir()->info().script()->function());
// The low bit indicates whether this call is constructing, just clear the
// other bits.
static_assert(CalleeToken_Function == 0x0,
"CalleeTokenTag value should match");
static_assert(CalleeToken_FunctionConstructing == 0x1,
"CalleeTokenTag value should match");
masm.andPtr(Imm32(0x1), output);
}
void CodeGenerator::visitReturn(LReturn* lir) {
#if defined(JS_NUNBOX32)
DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
DebugOnly<LAllocation*> result = lir->getOperand(0);
MOZ_ASSERT(ToRegister(result) == JSReturnReg);
#endif
// Don't emit a jump to the return label if this is the last block, as
// it'll fall through to the epilogue.
//
// This is -not- true however for a Generator-return, which may appear in the
// middle of the last block, so we should always emit the jump there.
if (current->mir() != *gen->graph().poBegin() || lir->isGenerator()) {
masm.jump(&returnLabel_);
}
}
void CodeGenerator::visitOsrEntry(LOsrEntry* lir) {
Register temp = ToRegister(lir->temp());
// Remember the OSR entry offset into the code buffer.
masm.flushBuffer();
setOsrEntryOffset(masm.size());
// Allocate the full frame for this function
// Note we have a new entry here. So we reset MacroAssembler::framePushed()
// to 0, before reserving the stack.
MOZ_ASSERT(masm.framePushed() == frameSize());
masm.setFramePushed(0);
// The Baseline code ensured both the frame pointer and stack pointer point to
// the JitFrameLayout on the stack.
// If profiling, save the current frame pointer to a per-thread global field.
if (isProfilerInstrumentationEnabled()) {
masm.profilerEnterFrame(FramePointer, temp);
}
masm.reserveStack(frameSize());
MOZ_ASSERT(masm.framePushed() == frameSize());
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
}
void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) {
const LAllocation* frame = lir->entry();
const LDefinition* object = lir->output();
const ptrdiff_t frameOffset =
BaselineFrame::reverseOffsetOfEnvironmentChain();
masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}
void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) {
const LAllocation* frame = lir->entry();
const LDefinition* object = lir->output();
const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}
void CodeGenerator::visitOsrValue(LOsrValue* value) {
const LAllocation* frame = value->entry();
const ValueOperand out = ToOutValue(value);
const ptrdiff_t frameOffset = value->mir()->frameOffset();
masm.loadValue(Address(ToRegister(frame), frameOffset), out);
}
void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) {
const LAllocation* frame = lir->entry();
const ValueOperand out = ToOutValue(lir);
Address flags =
Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
Address retval =
Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
masm.moveValue(UndefinedValue(), out);
Label done;
masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL),
&done);
masm.loadValue(retval, out);
masm.bind(&done);
}
void CodeGenerator::visitStackArgT(LStackArgT* lir) {
const LAllocation* arg = lir->arg();
MIRType argType = lir->type();
uint32_t argslot = lir->argslot();
MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
Address dest = AddressOfPassedArg(argslot);
if (arg->isFloatReg()) {
masm.boxDouble(ToFloatRegister(arg), dest);
} else if (arg->isGeneralReg()) {
masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
} else {
masm.storeValue(arg->toConstant()->toJSValue(), dest);
}
}
void CodeGenerator::visitStackArgV(LStackArgV* lir) {
ValueOperand val = ToValue(lir->value());
uint32_t argslot = lir->argslot();
MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
masm.storeValue(val, AddressOfPassedArg(argslot));
}
void CodeGenerator::visitMoveGroup(LMoveGroup* group) {
if (!group->numMoves()) {
return;
}
MoveResolver& resolver = masm.moveResolver();
for (size_t i = 0; i < group->numMoves(); i++) {
const LMove& move = group->getMove(i);
LAllocation from = move.from();
LAllocation to = move.to();
LDefinition::Type type = move.type();
// No bogus moves.
MOZ_ASSERT(from != to);
MOZ_ASSERT(!from.isConstant());
MoveOp::Type moveType;
switch (type) {
case LDefinition::OBJECT:
case LDefinition::SLOTS:
case LDefinition::WASM_ANYREF:
#ifdef JS_NUNBOX32
case LDefinition::TYPE:
case LDefinition::PAYLOAD:
#else
case LDefinition::BOX:
#endif
case LDefinition::GENERAL:
case LDefinition::STACKRESULTS:
moveType = MoveOp::GENERAL;
break;
case LDefinition::INT32:
moveType = MoveOp::INT32;
break;
case LDefinition::FLOAT32:
moveType = MoveOp::FLOAT32;
break;
case LDefinition::DOUBLE:
moveType = MoveOp::DOUBLE;
break;
case LDefinition::SIMD128:
moveType = MoveOp::SIMD128;
break;
default:
MOZ_CRASH("Unexpected move type");
}
masm.propagateOOM(
resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
}
masm.propagateOOM(resolver.resolve());
if (masm.oom()) {
return;
}
MoveEmitter emitter(masm);
#ifdef JS_CODEGEN_X86
if (group->maybeScratchRegister().isGeneralReg()) {
emitter.setScratchRegister(
group->maybeScratchRegister().toGeneralReg()->reg());
} else {
resolver.sortMemoryToMemoryMoves();
}
#endif
emitter.emit(resolver);
emitter.finish();
}
void CodeGenerator::visitInteger(LInteger* lir) {
masm.move32(Imm32(lir->i32()), ToRegister(lir->output()));
}
void CodeGenerator::visitInteger64(LInteger64* lir) {
masm.move64(Imm64(lir->i64()), ToOutRegister64(lir));
}
void CodeGenerator::visitPointer(LPointer* lir) {
masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
}
void CodeGenerator::visitDouble(LDouble* ins) {
masm.loadConstantDouble(ins->value(), ToFloatRegister(ins->output()));
}
void CodeGenerator::visitFloat32(LFloat32* ins) {
masm.loadConstantFloat32(ins->value(), ToFloatRegister(ins->output()));
}
void CodeGenerator::visitValue(LValue* value) {
ValueOperand result = ToOutValue(value);
masm.moveValue(value->value(), result);
}
void CodeGenerator::visitNurseryObject(LNurseryObject* lir) {
Register output = ToRegister(lir->output());
uint32_t nurseryIndex = lir->mir()->nurseryIndex();
// Load a pointer to the entry in IonScript's nursery objects list.
CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), output);
masm.propagateOOM(ionNurseryObjectLabels_.emplaceBack(label, nurseryIndex));
// Load the JSObject*.
masm.loadPtr(Address(output, 0), output);
}
void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) {
// No-op.
}
void CodeGenerator::visitDebugEnterGCUnsafeRegion(
LDebugEnterGCUnsafeRegion* lir) {
Register temp = ToRegister(lir->temp0());
masm.loadJSContext(temp);
Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
masm.add32(Imm32(1), inUnsafeRegion);
Label ok;
masm.branch32(Assembler::GreaterThan, inUnsafeRegion, Imm32(0), &ok);
masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
masm.bind(&ok);
}
void CodeGenerator::visitDebugLeaveGCUnsafeRegion(
LDebugLeaveGCUnsafeRegion* lir) {
Register temp = ToRegister(lir->temp0());
masm.loadJSContext(temp);
Address inUnsafeRegion(temp, JSContext::offsetOfInUnsafeRegion());
masm.add32(Imm32(-1), inUnsafeRegion);
Label ok;
masm.branch32(Assembler::GreaterThanOrEqual, inUnsafeRegion, Imm32(0), &ok);
masm.assumeUnreachable("unbalanced enter/leave GC unsafe region");
masm.bind(&ok);
}
void CodeGenerator::visitSlots(LSlots* lir) {
Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
masm.loadPtr(slots, ToRegister(lir->output()));
}
void CodeGenerator::visitLoadDynamicSlotV(LLoadDynamicSlotV* lir) {
ValueOperand dest = ToOutValue(lir);
Register base = ToRegister(lir->input());
int32_t offset = lir->mir()->slot() * sizeof(js::Value);
masm.loadValue(Address(base, offset), dest);
}
static ConstantOrRegister ToConstantOrRegister(const LAllocation* value,
MIRType valueType) {
if (value->isConstant()) {
return ConstantOrRegister(value->toConstant()->toJSValue());
}
return TypedOrValueRegister(valueType, ToAnyRegister(value));
}
void CodeGenerator::visitStoreDynamicSlotT(LStoreDynamicSlotT* lir) {
Register base = ToRegister(lir->slots());
int32_t offset = lir->mir()->slot() * sizeof(js::Value);
Address dest(base, offset);
if (lir->mir()->needsBarrier()) {
emitPreBarrier(dest);
}
MIRType valueType = lir->mir()->value()->type();
ConstantOrRegister value = ToConstantOrRegister(lir->value(), valueType);
masm.storeUnboxedValue(value, valueType, dest);
}
void CodeGenerator::visitStoreDynamicSlotV(LStoreDynamicSlotV* lir) {
Register base = ToRegister(lir->slots());
int32_t offset = lir->mir()->slot() * sizeof(Value);
ValueOperand value = ToValue(lir->value());
if (lir->mir()->needsBarrier()) {
emitPreBarrier(Address(base, offset));
}
masm.storeValue(value, Address(base, offset));
}
void CodeGenerator::visitElements(LElements* lir) {
Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements());
masm.loadPtr(elements, ToRegister(lir->output()));
}
void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) {
Address environment(ToRegister(lir->function()),
JSFunction::offsetOfEnvironment());
masm.unboxObject(environment, ToRegister(lir->output()));
}
void CodeGenerator::visitHomeObject(LHomeObject* lir) {
Register func = ToRegister(lir->function());
Address homeObject(func, FunctionExtended::offsetOfMethodHomeObjectSlot());
masm.assertFunctionIsExtended(func);
#ifdef DEBUG
Label isObject;
masm.branchTestObject(Assembler::Equal, homeObject, &isObject);
masm.assumeUnreachable("[[HomeObject]] must be Object");
masm.bind(&isObject);
#endif
masm.unboxObject(homeObject, ToRegister(lir->output()));
}
void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) {
Register homeObject = ToRegister(lir->homeObject());
ValueOperand output = ToOutValue(lir);
Register temp = output.scratchReg();
masm.loadObjProto(homeObject, temp);
#ifdef DEBUG
// We won't encounter a lazy proto, because the prototype is guaranteed to
// either be a JSFunction or a PlainObject, and only proxy objects can have a
// lazy proto.
MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
Label proxyCheckDone;
masm.branchPtr(Assembler::NotEqual, temp, ImmWord(1), &proxyCheckDone);
masm.assumeUnreachable("Unexpected lazy proto in JSOp::SuperBase");
masm.bind(&proxyCheckDone);
#endif
Label nullProto, done;
masm.branchPtr(Assembler::Equal, temp, ImmWord(0), &nullProto);
// Box prototype and return
masm.tagValue(JSVAL_TYPE_OBJECT, temp, output);
masm.jump(&done);
masm.bind(&nullProto);
masm.moveValue(NullValue(), output);
masm.bind(&done);
}
template <class T>
static T* ToConstantObject(MDefinition* def) {
MOZ_ASSERT(def->isConstant());
return &def->toConstant()->toObject().as<T>();
}
void CodeGenerator::visitNewLexicalEnvironmentObject(
LNewLexicalEnvironmentObject* lir) {
Register output = ToRegister(lir->output());
Register temp = ToRegister(lir->temp0());
auto* templateObj = ToConstantObject<BlockLexicalEnvironmentObject>(
lir->mir()->templateObj());
auto* scope = &templateObj->scope();
gc::Heap initialHeap = gc::Heap::Default;
using Fn =
BlockLexicalEnvironmentObject* (*)(JSContext*, Handle<LexicalScope*>);
auto* ool =
oolCallVM<Fn, BlockLexicalEnvironmentObject::createWithoutEnclosing>(
lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
TemplateObject templateObject(templateObj);
masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitNewClassBodyEnvironmentObject(
LNewClassBodyEnvironmentObject* lir) {
Register output = ToRegister(lir->output());
Register temp = ToRegister(lir->temp0());
auto* templateObj = ToConstantObject<ClassBodyLexicalEnvironmentObject>(
lir->mir()->templateObj());
auto* scope = &templateObj->scope();
gc::Heap initialHeap = gc::Heap::Default;
using Fn = ClassBodyLexicalEnvironmentObject* (*)(JSContext*,
Handle<ClassBodyScope*>);
auto* ool =
oolCallVM<Fn, ClassBodyLexicalEnvironmentObject::createWithoutEnclosing>(
lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
TemplateObject templateObject(templateObj);
masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitNewVarEnvironmentObject(
LNewVarEnvironmentObject* lir) {
Register output = ToRegister(lir->output());
Register temp = ToRegister(lir->temp0());
auto* templateObj =
ToConstantObject<VarEnvironmentObject>(lir->mir()->templateObj());
auto* scope = &templateObj->scope().as<VarScope>();
gc::Heap initialHeap = gc::Heap::Default;
using Fn = VarEnvironmentObject* (*)(JSContext*, Handle<VarScope*>);
auto* ool = oolCallVM<Fn, VarEnvironmentObject::createWithoutEnclosing>(
lir, ArgList(ImmGCPtr(scope)), StoreRegisterTo(output));
TemplateObject templateObject(templateObj);
masm.createGCObject(output, temp, templateObject, initialHeap, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitGuardShape(LGuardShape* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToTempRegisterOrInvalid(guard->temp0());
Label bail;
masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp,
obj, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardFuse(LGuardFuse* guard) {
auto fuseIndex = guard->mir()->fuseIndex();
Register temp = ToRegister(guard->temp0());
Label bail;
// Bake specific fuse address for Ion code, because we won't share this code
// across realms.
GuardFuse* fuse = mirGen().realm->realmFuses().getFuseByIndex(fuseIndex);
masm.loadPtr(AbsoluteAddress(fuse->fuseRef()), temp);
masm.branchPtr(Assembler::NotEqual, temp, ImmPtr(nullptr), &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardMultipleShapes(LGuardMultipleShapes* guard) {
Register obj = ToRegister(guard->object());
Register shapeList = ToRegister(guard->shapeList());
Register temp = ToRegister(guard->temp0());
Register temp2 = ToRegister(guard->temp1());
Register temp3 = ToRegister(guard->temp2());
Register spectre = ToTempRegisterOrInvalid(guard->temp3());
Label bail;
masm.loadPtr(Address(shapeList, NativeObject::offsetOfElements()), temp);
masm.branchTestObjShapeList(Assembler::NotEqual, obj, temp, temp2, temp3,
spectre, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardProto(LGuardProto* guard) {
Register obj = ToRegister(guard->object());
Register expected = ToRegister(guard->expected());
Register temp = ToRegister(guard->temp0());
masm.loadObjProto(obj, temp);
Label bail;
masm.branchPtr(Assembler::NotEqual, temp, expected, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardNullProto(LGuardNullProto* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
masm.loadObjProto(obj, temp);
Label bail;
masm.branchTestPtr(Assembler::NonZero, temp, temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsNativeObject(LGuardIsNativeObject* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.branchIfNonNativeObj(obj, temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardGlobalGeneration(LGuardGlobalGeneration* guard) {
Register temp = ToRegister(guard->temp0());
Label bail;
masm.load32(AbsoluteAddress(guard->mir()->generationAddr()), temp);
masm.branch32(Assembler::NotEqual, temp, Imm32(guard->mir()->expected()),
&bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsProxy(LGuardIsProxy* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.branchTestObjectIsProxy(false, obj, temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsNotProxy(LGuardIsNotProxy* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.branchTestObjectIsProxy(true, obj, temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsNotDOMProxy(LGuardIsNotDOMProxy* guard) {
Register proxy = ToRegister(guard->proxy());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.branchTestProxyHandlerFamily(Assembler::Equal, proxy, temp,
GetDOMProxyHandlerFamily(), &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitProxyGet(LProxyGet* lir) {
Register proxy = ToRegister(lir->proxy());
Register temp = ToRegister(lir->temp0());
pushArg(lir->mir()->id(), temp);
pushArg(proxy);
using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
callVM<Fn, ProxyGetProperty>(lir);
}
void CodeGenerator::visitProxyGetByValue(LProxyGetByValue* lir) {
Register proxy = ToRegister(lir->proxy());
ValueOperand idVal = ToValue(lir->idVal());
pushArg(idVal);
pushArg(proxy);
using Fn =
bool (*)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
callVM<Fn, ProxyGetPropertyByValue>(lir);
}
void CodeGenerator::visitProxyHasProp(LProxyHasProp* lir) {
Register proxy = ToRegister(lir->proxy());
ValueOperand idVal = ToValue(lir->id());
pushArg(idVal);
pushArg(proxy);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool*);
if (lir->mir()->hasOwn()) {
callVM<Fn, ProxyHasOwn>(lir);
} else {
callVM<Fn, ProxyHas>(lir);
}
}
void CodeGenerator::visitProxySet(LProxySet* lir) {
Register proxy = ToRegister(lir->proxy());
ValueOperand rhs = ToValue(lir->rhs());
Register temp = ToRegister(lir->temp0());
pushArg(Imm32(lir->mir()->strict()));
pushArg(rhs);
pushArg(lir->mir()->id(), temp);
pushArg(proxy);
using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
callVM<Fn, ProxySetProperty>(lir);
}
void CodeGenerator::visitProxySetByValue(LProxySetByValue* lir) {
Register proxy = ToRegister(lir->proxy());
ValueOperand idVal = ToValue(lir->idVal());
ValueOperand rhs = ToValue(lir->rhs());
pushArg(Imm32(lir->mir()->strict()));
pushArg(rhs);
pushArg(idVal);
pushArg(proxy);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
callVM<Fn, ProxySetPropertyByValue>(lir);
}
void CodeGenerator::visitCallSetArrayLength(LCallSetArrayLength* lir) {
Register obj = ToRegister(lir->obj());
ValueOperand rhs = ToValue(lir->rhs());
pushArg(Imm32(lir->mir()->strict()));
pushArg(rhs);
pushArg(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue, bool);
callVM<Fn, jit::SetArrayLength>(lir);
}
void CodeGenerator::visitMegamorphicLoadSlot(LMegamorphicLoadSlot* lir) {
Register obj = ToRegister(lir->object());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
Register temp3 = ToRegister(lir->temp3());
ValueOperand output = ToOutValue(lir);
Label cacheHit;
masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
output, &cacheHit);
Label bail;
masm.branchIfNonNativeObj(obj, temp0, &bail);
masm.Push(UndefinedValue());
masm.moveStackPtrTo(temp3);
using Fn = bool (*)(JSContext* cx, JSObject* obj, PropertyKey id,
MegamorphicCache::Entry* cacheEntry, Value* vp);
masm.setupAlignedABICall();
masm.loadJSContext(temp0);
masm.passABIArg(temp0);
masm.passABIArg(obj);
masm.movePropertyKey(lir->mir()->name(), temp1);
masm.passABIArg(temp1);
masm.passABIArg(temp2);
masm.passABIArg(temp3);
masm.callWithABI<Fn, GetNativeDataPropertyPure>();
MOZ_ASSERT(!output.aliases(ReturnReg));
masm.Pop(output);
masm.branchIfFalseBool(ReturnReg, &bail);
masm.bind(&cacheHit);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitMegamorphicLoadSlotPermissive(
LMegamorphicLoadSlotPermissive* lir) {
Register obj = ToRegister(lir->object());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
Register temp3 = ToRegister(lir->temp3());
ValueOperand output = ToOutValue(lir);
masm.movePtr(obj, temp3);
Label done, getter, nullGetter;
masm.emitMegamorphicCacheLookup(lir->mir()->name(), obj, temp0, temp1, temp2,
output, &done, &getter);
masm.movePropertyKey(lir->mir()->name(), temp1);
pushArg(temp2);
pushArg(temp1);
pushArg(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleId,
MegamorphicCacheEntry*, MutableHandleValue);
callVM<Fn, GetPropMaybeCached>(lir);
masm.jump(&done);
masm.bind(&getter);
emitCallMegamorphicGetter(lir, output, temp3, temp1, temp2, &nullGetter);
masm.jump(&done);
masm.bind(&nullGetter);
masm.moveValue(UndefinedValue(), output);
masm.bind(&done);
}
void CodeGenerator::visitMegamorphicLoadSlotByValue(
LMegamorphicLoadSlotByValue* lir) {
Register obj = ToRegister(lir->object());
ValueOperand idVal = ToValue(lir->idVal());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
ValueOperand output = ToOutValue(lir);
Label cacheHit, bail;
masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
output, &cacheHit);
masm.branchIfNonNativeObj(obj, temp0, &bail);
// idVal will be in vp[0], result will be stored in vp[1].
masm.reserveStack(sizeof(Value));
masm.Push(idVal);
masm.moveStackPtrTo(temp0);
using Fn = bool (*)(JSContext* cx, JSObject* obj,
MegamorphicCache::Entry* cacheEntry, Value* vp);
masm.setupAlignedABICall();
masm.loadJSContext(temp1);
masm.passABIArg(temp1);
masm.passABIArg(obj);
masm.passABIArg(temp2);
masm.passABIArg(temp0);
masm.callWithABI<Fn, GetNativeDataPropertyByValuePure>();
MOZ_ASSERT(!idVal.aliases(temp0));
masm.storeCallPointerResult(temp0);
masm.Pop(idVal);
uint32_t framePushed = masm.framePushed();
Label ok;
masm.branchIfTrueBool(temp0, &ok);
masm.freeStack(sizeof(Value)); // Discard result Value.
masm.jump(&bail);
masm.bind(&ok);
masm.setFramePushed(framePushed);
masm.Pop(output);
masm.bind(&cacheHit);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitMegamorphicLoadSlotByValuePermissive(
LMegamorphicLoadSlotByValuePermissive* lir) {
Register obj = ToRegister(lir->object());
ValueOperand idVal = ToValue(lir->idVal());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
ValueOperand output = ToOutValue(lir);
// If we have enough registers available, we can call getters directly from
// jitcode. On x86, we have to call into the VM.
#ifndef JS_CODEGEN_X86
Label done, getter, nullGetter;
Register temp3 = ToRegister(lir->temp3());
masm.movePtr(obj, temp3);
masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
output, &done, &getter);
#else
Label done;
masm.emitMegamorphicCacheLookupByValue(idVal, obj, temp0, temp1, temp2,
output, &done);
#endif
pushArg(temp2);
pushArg(idVal);
pushArg(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleValue,
MegamorphicCacheEntry*, MutableHandleValue);
callVM<Fn, GetElemMaybeCached>(lir);
#ifndef JS_CODEGEN_X86
masm.jump(&done);
masm.bind(&getter);
emitCallMegamorphicGetter(lir, output, temp3, temp1, temp2, &nullGetter);
masm.jump(&done);
masm.bind(&nullGetter);
masm.moveValue(UndefinedValue(), output);
#endif
masm.bind(&done);
}
void CodeGenerator::visitMegamorphicStoreSlot(LMegamorphicStoreSlot* lir) {
Register obj = ToRegister(lir->object());
ValueOperand value = ToValue(lir->rhs());
Register temp0 = ToRegister(lir->temp0());
#ifndef JS_CODEGEN_X86
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
#endif
// The instruction is marked as call-instruction so only these registers are
// live.
LiveRegisterSet liveRegs;
liveRegs.addUnchecked(obj);
liveRegs.addUnchecked(value);
liveRegs.addUnchecked(temp0);
#ifndef JS_CODEGEN_X86
liveRegs.addUnchecked(temp1);
liveRegs.addUnchecked(temp2);
#endif
Label cacheHit, done;
#ifdef JS_CODEGEN_X86
masm.emitMegamorphicCachedSetSlot(
lir->mir()->name(), obj, temp0, value, liveRegs, &cacheHit,
[](MacroAssembler& masm, const Address& addr, MIRType mirType) {
EmitPreBarrier(masm, addr, mirType);
});
#else
masm.emitMegamorphicCachedSetSlot(
lir->mir()->name(), obj, temp0, temp1, temp2, value, liveRegs, &cacheHit,
[](MacroAssembler& masm, const Address& addr, MIRType mirType) {
EmitPreBarrier(masm, addr, mirType);
});
#endif
pushArg(Imm32(lir->mir()->strict()));
pushArg(value);
pushArg(lir->mir()->name(), temp0);
pushArg(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleId, HandleValue, bool);
callVM<Fn, SetPropertyMegamorphic<true>>(lir);
masm.jump(&done);
masm.bind(&cacheHit);
masm.branchPtrInNurseryChunk(Assembler::Equal, obj, temp0, &done);
masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp0, &done);
// Note: because this is a call-instruction, no registers need to be saved.
MOZ_ASSERT(lir->isCall());
emitPostWriteBarrier(obj);
masm.bind(&done);
}
void CodeGenerator::visitMegamorphicHasProp(LMegamorphicHasProp* lir) {
Register obj = ToRegister(lir->object());
ValueOperand idVal = ToValue(lir->idVal());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Register temp2 = ToRegister(lir->temp2());
Register output = ToRegister(lir->output());
Label bail, cacheHit;
masm.emitMegamorphicCacheLookupExists(idVal, obj, temp0, temp1, temp2, output,
&cacheHit, lir->mir()->hasOwn());
masm.branchIfNonNativeObj(obj, temp0, &bail);
// idVal will be in vp[0], result will be stored in vp[1].
masm.reserveStack(sizeof(Value));
masm.Push(idVal);
masm.moveStackPtrTo(temp0);
using Fn = bool (*)(JSContext* cx, JSObject* obj,
MegamorphicCache::Entry* cacheEntry, Value* vp);
masm.setupAlignedABICall();
masm.loadJSContext(temp1);
masm.passABIArg(temp1);
masm.passABIArg(obj);
masm.passABIArg(temp2);
masm.passABIArg(temp0);
if (lir->mir()->hasOwn()) {
masm.callWithABI<Fn, HasNativeDataPropertyPure<true>>();
} else {
masm.callWithABI<Fn, HasNativeDataPropertyPure<false>>();
}
MOZ_ASSERT(!idVal.aliases(temp0));
masm.storeCallPointerResult(temp0);
masm.Pop(idVal);
uint32_t framePushed = masm.framePushed();
Label ok;
masm.branchIfTrueBool(temp0, &ok);
masm.freeStack(sizeof(Value)); // Discard result Value.
masm.jump(&bail);
masm.bind(&ok);
masm.setFramePushed(framePushed);
masm.unboxBoolean(Address(masm.getStackPointer(), 0), output);
masm.freeStack(sizeof(Value));
masm.bind(&cacheHit);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitSmallObjectVariableKeyHasProp(
LSmallObjectVariableKeyHasProp* lir) {
Register id = ToRegister(lir->idStr());
Register output = ToRegister(lir->output());
#ifdef DEBUG
Label isAtom;
masm.branchTest32(Assembler::NonZero, Address(id, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
masm.assumeUnreachable("Expected atom input");
masm.bind(&isAtom);
#endif
SharedShape* shape = &lir->mir()->shape()->asShared();
Label done, success;
for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
masm.branchPtr(Assembler::Equal, id, ImmGCPtr(iter->key().toAtom()),
&success);
}
masm.move32(Imm32(0), output);
masm.jump(&done);
masm.bind(&success);
masm.move32(Imm32(1), output);
masm.bind(&done);
}
void CodeGenerator::visitGuardIsNotArrayBufferMaybeShared(
LGuardIsNotArrayBufferMaybeShared* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.loadObjClassUnsafe(obj, temp);
masm.branchPtr(Assembler::Equal, temp,
ImmPtr(&FixedLengthArrayBufferObject::class_), &bail);
masm.branchPtr(Assembler::Equal, temp,
ImmPtr(&FixedLengthSharedArrayBufferObject::class_), &bail);
masm.branchPtr(Assembler::Equal, temp,
ImmPtr(&ResizableArrayBufferObject::class_), &bail);
masm.branchPtr(Assembler::Equal, temp,
ImmPtr(&GrowableSharedArrayBufferObject::class_), &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsTypedArray(LGuardIsTypedArray* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.loadObjClassUnsafe(obj, temp);
masm.branchIfClassIsNotTypedArray(temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsFixedLengthTypedArray(
LGuardIsFixedLengthTypedArray* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.loadObjClassUnsafe(obj, temp);
masm.branchIfClassIsNotFixedLengthTypedArray(temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardIsResizableTypedArray(
LGuardIsResizableTypedArray* guard) {
Register obj = ToRegister(guard->object());
Register temp = ToRegister(guard->temp0());
Label bail;
masm.loadObjClassUnsafe(obj, temp);
masm.branchIfClassIsNotResizableTypedArray(temp, &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardHasProxyHandler(LGuardHasProxyHandler* guard) {
Register obj = ToRegister(guard->object());
Label bail;
Address handlerAddr(obj, ProxyObject::offsetOfHandler());
masm.branchPtr(Assembler::NotEqual, handlerAddr,
ImmPtr(guard->mir()->handler()), &bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) {
Register input = ToRegister(guard->input());
Register expected = ToRegister(guard->expected());
Assembler::Condition cond =
guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
bailoutCmpPtr(cond, input, expected, guard->snapshot());
}
void CodeGenerator::visitGuardSpecificFunction(LGuardSpecificFunction* guard) {
Register input = ToRegister(guard->input());
Register expected = ToRegister(guard->expected());
bailoutCmpPtr(Assembler::NotEqual, input, expected, guard->snapshot());
}
void CodeGenerator::visitGuardSpecificAtom(LGuardSpecificAtom* guard) {
Register str = ToRegister(guard->str());
Register scratch = ToRegister(guard->temp0());
LiveRegisterSet volatileRegs = liveVolatileRegs(guard);
volatileRegs.takeUnchecked(scratch);
Label bail;
masm.guardSpecificAtom(str, guard->mir()->atom(), scratch, volatileRegs,
&bail);
bailoutFrom(&bail, guard->snapshot());
}
void CodeGenerator::visitGuardSpecificSymbol(LGuardSpecificSymbol* guard) {
Register symbol = ToRegister(guard->symbol());
bailoutCmpPtr(Assembler::NotEqual, symbol, ImmGCPtr(guard->mir()->expected()),
guard->snapshot());
}
void CodeGenerator::visitGuardSpecificInt32(LGuardSpecificInt32* guard) {
Register num = ToRegister(guard->num());
bailoutCmp32(Assembler::NotEqual, num, Imm32(guard->mir()->expected()),
guard->snapshot());
}
void CodeGenerator::visitGuardStringToIndex(LGuardStringToIndex* lir) {
Register str = ToRegister(lir->string());
Register output = ToRegister(lir->output());
Label vmCall, done;
masm.loadStringIndexValue(str, output, &vmCall);
masm.jump(&done);
{
masm.bind(&vmCall);
LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
volatileRegs.takeUnchecked(output);
masm.PushRegsInMask(volatileRegs);
using Fn = int32_t (*)(JSString* str);
masm.setupAlignedABICall();
masm.passABIArg(str);
masm.callWithABI<Fn, GetIndexFromString>();
masm.storeCallInt32Result(output);
masm.PopRegsInMask(volatileRegs);
// GetIndexFromString returns a negative value on failure.
bailoutTest32(Assembler::Signed, output, output, lir->snapshot());
}
masm.bind(&done);
}
void CodeGenerator::visitGuardStringToInt32(LGuardStringToInt32* lir) {
Register str = ToRegister(lir->string());
Register output = ToRegister(lir->output());
Register temp = ToRegister(lir->temp0());
LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
Label bail;
masm.guardStringToInt32(str, output, temp, volatileRegs, &bail);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardStringToDouble(LGuardStringToDouble* lir) {
Register str = ToRegister(lir->string());
FloatRegister output = ToFloatRegister(lir->output());
Register temp0 = ToRegister(lir->temp0());
Register temp1 = ToRegister(lir->temp1());
Label vmCall, done;
// Use indexed value as fast path if possible.
masm.loadStringIndexValue(str, temp0, &vmCall);
masm.convertInt32ToDouble(temp0, output);
masm.jump(&done);
{
masm.bind(&vmCall);
// Reserve stack for holding the result value of the call.
masm.reserveStack(sizeof(double));
masm.moveStackPtrTo(temp0);
LiveRegisterSet volatileRegs = liveVolatileRegs(lir);
volatileRegs.takeUnchecked(temp0);
volatileRegs.takeUnchecked(temp1);
masm.PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
masm.setupAlignedABICall();
masm.loadJSContext(temp1);
masm.passABIArg(temp1);
masm.passABIArg(str);
masm.passABIArg(temp0);
masm.callWithABI<Fn, StringToNumberPure>();
masm.storeCallPointerResult(temp0);
masm.PopRegsInMask(volatileRegs);
Label ok;
masm.branchIfTrueBool(temp0, &ok);
{
// OOM path, recovered by StringToNumberPure.
//
// Use addToStackPtr instead of freeStack as freeStack tracks stack height
// flow-insensitively, and using it here would confuse the stack height
// tracking.
masm.addToStackPtr(Imm32(sizeof(double)));
bailout(lir->snapshot());
}
masm.bind(&ok);
masm.Pop(output);
}
masm.bind(&done);
}
void CodeGenerator::visitGuardNoDenseElements(LGuardNoDenseElements* guard) {
Register obj = ToRegister(guard->input());
Register temp = ToRegister(guard->temp0());
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), temp);
// Make sure there are no dense elements.
Address initLength(temp, ObjectElements::offsetOfInitializedLength());
bailoutCmp32(Assembler::NotEqual, initLength, Imm32(0), guard->snapshot());
}
void CodeGenerator::visitBooleanToInt64(LBooleanToInt64* lir) {
Register input = ToRegister(lir->input());
Register64 output = ToOutRegister64(lir);
masm.move32To64ZeroExtend(input, output);
}
void CodeGenerator::emitStringToInt64(LInstruction* lir, Register input,
Register64 output) {
Register temp = output.scratchReg();
saveLive(lir);
masm.reserveStack(sizeof(uint64_t));
masm.moveStackPtrTo(temp);
pushArg(temp);
pushArg(input);
using Fn = bool (*)(JSContext*, HandleString, uint64_t*);
callVM<Fn, DoStringToInt64>(lir);
masm.load64(Address(masm.getStackPointer(), 0), output);
masm.freeStack(sizeof(uint64_t));
restoreLiveIgnore(lir, StoreValueTo(output).clobbered());
}
void CodeGenerator::visitStringToInt64(LStringToInt64* lir) {
Register input = ToRegister(lir->input());
Register64 output = ToOutRegister64(lir);
emitStringToInt64(lir, input, output);
}
void CodeGenerator::visitValueToInt64(LValueToInt64* lir) {
ValueOperand input = ToValue(lir->input());
Register temp = ToRegister(lir->temp0());
Register64 output = ToOutRegister64(lir);
int checks = 3;
Label fail, done;
// Jump to fail if this is the last check and we fail it,
// otherwise to the next test.
auto emitTestAndUnbox = [&](auto testAndUnbox) {
MOZ_ASSERT(checks > 0);
checks--;
Label notType;
Label* target = checks ? &notType : &fail;
testAndUnbox(target);
if (checks) {
masm.jump(&done);
masm.bind(&notType);
}
};
Register tag = masm.extractTag(input, temp);
// BigInt.
emitTestAndUnbox([&](Label* target) {
masm.branchTestBigInt(Assembler::NotEqual, tag, target);
masm.unboxBigInt(input, temp);
masm.loadBigInt64(temp, output);
});
// Boolean
emitTestAndUnbox([&](Label* target) {
masm.branchTestBoolean(Assembler::NotEqual, tag, target);
masm.unboxBoolean(input, temp);
masm.move32To64ZeroExtend(temp, output);
});
// String
emitTestAndUnbox([&](Label* target) {
masm.branchTestString(Assembler::NotEqual, tag, target);
masm.unboxString(input, temp);
emitStringToInt64(lir, temp, output);
});
MOZ_ASSERT(checks == 0);
bailoutFrom(&fail, lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitTruncateBigIntToInt64(LTruncateBigIntToInt64* lir) {
Register operand = ToRegister(lir->input());
Register64 output = ToOutRegister64(lir);
masm.loadBigInt64(operand, output);
}
OutOfLineCode* CodeGenerator::createBigIntOutOfLine(LInstruction* lir,
Scalar::Type type,
Register64 input,
Register output) {
#if JS_BITS_PER_WORD == 32
using Fn = BigInt* (*)(JSContext*, uint32_t, uint32_t);
auto args = ArgList(input.low, input.high);
#else
using Fn = BigInt* (*)(JSContext*, uint64_t);
auto args = ArgList(input);
#endif
if (type == Scalar::BigInt64) {
return oolCallVM<Fn, jit::CreateBigIntFromInt64>(lir, args,
StoreRegisterTo(output));
}
MOZ_ASSERT(type == Scalar::BigUint64);
return oolCallVM<Fn, jit::CreateBigIntFromUint64>(lir, args,
StoreRegisterTo(output));
}
void CodeGenerator::emitCreateBigInt(LInstruction* lir, Scalar::Type type,
Register64 input, Register output,
Register maybeTemp,
Register64 maybeTemp64) {
OutOfLineCode* ool = createBigIntOutOfLine(lir, type, input, output);
if (maybeTemp != InvalidReg) {
masm.newGCBigInt(output, maybeTemp, initialBigIntHeap(), ool->entry());
} else {
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(output);
Register temp = regs.takeAny();
masm.push(temp);
Label fail, ok;
masm.newGCBigInt(output, temp, initialBigIntHeap(), &fail);
masm.pop(temp);
masm.jump(&ok);
masm.bind(&fail);
masm.pop(temp);
masm.jump(ool->entry());
masm.bind(&ok);
}
masm.initializeBigInt64(type, output, input, maybeTemp64);
masm.bind(ool->rejoin());
}
void CodeGenerator::emitCallMegamorphicGetter(
LInstruction* lir, ValueOperand accessorAndOutput, Register obj,
Register calleeScratch, Register argcScratch, Label* nullGetter) {
MOZ_ASSERT(calleeScratch == IonGenericCallCalleeReg);
MOZ_ASSERT(argcScratch == IonGenericCallArgcReg);
masm.unboxNonDouble(accessorAndOutput, calleeScratch,
JSVAL_TYPE_PRIVATE_GCTHING);
masm.loadPtr(Address(calleeScratch, GetterSetter::offsetOfGetter()),
calleeScratch);
masm.branchTestPtr(Assembler::Zero, calleeScratch, calleeScratch, nullGetter);
masm.loadPtr(Address(calleeScratch, JSFunction::offsetOfJitInfoOrScript()),
argcScratch);
if (JitStackValueAlignment > 1) {
masm.reserveStack(sizeof(Value) * (JitStackValueAlignment - 1));
}
masm.pushValue(JSVAL_TYPE_OBJECT, obj);
masm.checkStackAlignment();
masm.move32(Imm32(0), argcScratch);
ensureOsiSpace();
TrampolinePtr genericCallStub =
gen->jitRuntime()->getIonGenericCallStub(IonGenericCallKind::Call);
uint32_t callOffset = masm.callJit(genericCallStub);
markSafepointAt(callOffset, lir);
masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
masm.moveValue(JSReturnOperand, accessorAndOutput);
masm.setFramePushed(frameSize());
emitRestoreStackPointerFromFP();
}
void CodeGenerator::visitInt64ToBigInt(LInt64ToBigInt* lir) {
Register64 input = ToRegister64(lir->input());
Register64 temp = ToRegister64(lir->temp0());
Register output = ToRegister(lir->output());
emitCreateBigInt(lir, Scalar::BigInt64, input, output, temp.scratchReg(),
temp);
}
void CodeGenerator::visitUint64ToBigInt(LUint64ToBigInt* lir) {
Register64 input = ToRegister64(lir->input());
Register temp = ToRegister(lir->temp0());
Register output = ToRegister(lir->output());
emitCreateBigInt(lir, Scalar::BigUint64, input, output, temp);
}
void CodeGenerator::visitInt64ToIntPtr(LInt64ToIntPtr* lir) {
Register64 input = ToRegister64(lir->input());
#ifdef JS_64BIT
MOZ_ASSERT(input.reg == ToRegister(lir->output()));
#else
Register output = ToRegister(lir->output());
#endif
Label bail;
if (lir->mir()->isSigned()) {
masm.branchInt64NotInPtrRange(input, &bail);
} else {
masm.branchUInt64NotInPtrRange(input, &bail);
}
bailoutFrom(&bail, lir->snapshot());
#ifndef JS_64BIT
masm.move64To32(input, output);
#endif
}
void CodeGenerator::visitIntPtrToInt64(LIntPtrToInt64* lir) {
#ifdef JS_64BIT
MOZ_CRASH("Not used on 64-bit platforms");
#else
Register input = ToRegister(lir->input());
Register64 output = ToOutRegister64(lir);
masm.move32To64SignExtend(input, output);
#endif
}
void CodeGenerator::visitGuardValue(LGuardValue* lir) {
ValueOperand input = ToValue(lir->input());
Register nanTemp = ToTempRegisterOrInvalid(lir->temp0());
Value expected = lir->mir()->expected();
Label bail;
if (expected.isNaN()) {
masm.branchTestNaNValue(Assembler::NotEqual, input, nanTemp, &bail);
} else {
MOZ_ASSERT(nanTemp == InvalidReg);
masm.branchTestValue(Assembler::NotEqual, input, expected, &bail);
}
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardNullOrUndefined(LGuardNullOrUndefined* lir) {
ValueOperand input = ToValue(lir->input());
ScratchTagScope tag(masm, input);
masm.splitTagForTest(input, tag);
Label done;
masm.branchTestNull(Assembler::Equal, tag, &done);
Label bail;
masm.branchTestUndefined(Assembler::NotEqual, tag, &bail);
bailoutFrom(&bail, lir->snapshot());
masm.bind(&done);
}
void CodeGenerator::visitGuardIsNotObject(LGuardIsNotObject* lir) {
ValueOperand input = ToValue(lir->input());
Label bail;
masm.branchTestObject(Assembler::Equal, input, &bail);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardFunctionFlags(LGuardFunctionFlags* lir) {
Register function = ToRegister(lir->function());
Label bail;
if (uint16_t flags = lir->mir()->expectedFlags()) {
masm.branchTestFunctionFlags(function, flags, Assembler::Zero, &bail);
}
if (uint16_t flags = lir->mir()->unexpectedFlags()) {
masm.branchTestFunctionFlags(function, flags, Assembler::NonZero, &bail);
}
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardFunctionIsNonBuiltinCtor(
LGuardFunctionIsNonBuiltinCtor* lir) {
Register function = ToRegister(lir->function());
Register temp = ToRegister(lir->temp0());
Label bail;
masm.branchIfNotFunctionIsNonBuiltinCtor(function, temp, &bail);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardFunctionKind(LGuardFunctionKind* lir) {
Register function = ToRegister(lir->function());
Register temp = ToRegister(lir->temp0());
Assembler::Condition cond =
lir->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
Label bail;
masm.branchFunctionKind(cond, lir->mir()->expected(), function, temp, &bail);
bailoutFrom(&bail, lir->snapshot());
}
void CodeGenerator::visitGuardFunctionScript(LGuardFunctionScript* lir) {
Register function = ToRegister(lir->function());
Address scriptAddr(function, JSFunction::offsetOfJitInfoOrScript());
bailoutCmpPtr(Assembler::NotEqual, scriptAddr,
ImmGCPtr(lir->mir()->expected()), lir->snapshot());
}
// Out-of-line path to update the store buffer.
class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> {
LInstruction* lir_;
const LAllocation* object_;
public:
OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object)
: lir_(lir), object_(object) {}
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineCallPostWriteBarrier(this);
}
LInstruction* lir() const { return lir_; }
const LAllocation* object() const { return object_; }
};
static void EmitStoreBufferCheckForConstant(MacroAssembler& masm,
const gc::TenuredCell* cell,
AllocatableGeneralRegisterSet& regs,
Label* exit, Label* callVM) {
Register temp = regs.takeAny();
gc::Arena* arena = cell->arena();
Register cells = temp;
masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells);
size_t index = gc::ArenaCellSet::getCellIndex(cell);
size_t word;
uint32_t mask;
gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask);
size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t);
masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask),
exit);
// Check whether this is the sentinel set and if so call the VM to allocate
// one for this arena.
masm.branchPtr(Assembler::Equal,
Address(cells, gc::ArenaCellSet::offsetOfArena()),
ImmPtr(nullptr), callVM);
// Add the cell to the set.
masm.or32(Imm32(mask), Address(cells, offset));
masm.jump(exit);
regs.add(temp);
}
static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime,
Register objreg, JSObject* maybeConstant,
bool isGlobal,
AllocatableGeneralRegisterSet& regs) {
MOZ_ASSERT_IF(isGlobal, maybeConstant);
Label callVM;
Label exit;
Register temp = regs.takeAny();
// We already have a fast path to check whether a global is in the store
// buffer.
if (!isGlobal) {
if (maybeConstant) {
// Check store buffer bitmap directly for known object.
EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs,
&exit, &callVM);
} else {
// Check one element cache to avoid VM call.
masm.branchPtr(Assembler::Equal,
AbsoluteAddress(runtime->addressOfLastBufferedWholeCell()),
objreg, &exit);
}
}
// Call into the VM to barrier the write.
masm.bind(&callVM);
Register runtimereg = temp;
masm.mov(ImmPtr(runtime), runtimereg);
masm.setupAlignedABICall();
masm.passABIArg(runtimereg);
masm.passABIArg(objreg);
if (isGlobal) {
using Fn = void (*)(JSRuntime* rt, GlobalObject* obj);
masm.callWithABI<Fn, PostGlobalWriteBarrier>();
} else {
using Fn = void (*)(JSRuntime* rt, js::gc::Cell* obj);
masm.callWithABI<Fn, PostWriteBarrier>();
}
masm.bind(&exit);
}
void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) {
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
Register objreg;
JSObject* object = nullptr;
bool isGlobal = false;
if (obj->isConstant()) {
object = &obj->toConstant()->toObject();
isGlobal = isGlobalObject(object);
objreg = regs.takeAny();
masm.movePtr(ImmGCPtr(object), objreg);
} else {
objreg = ToRegister(obj);
regs.takeUnchecked(objreg);
}
EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs);
}
// Returns true if `def` might be allocated in the nursery.
static bool ValueNeedsPostBarrier(MDefinition* def) {
if (def->isBox()) {
def = def->toBox()->input();
}
if (def->type() == MIRType::Value) {
return true;
}
return NeedsPostBarrier(def->type());
}
void CodeGenerator::emitElementPostWriteBarrier(
MInstruction* mir, const LiveRegisterSet& liveVolatileRegs, Register obj,
const LAllocation* index, Register scratch, const ConstantOrRegister& val,
int32_t indexDiff) {
if (val.constant()) {
MOZ_ASSERT_IF(val.value().isGCThing(),
!IsInsideNursery(val.value().toGCThing()));
return;
}
TypedOrValueRegister reg = val.reg();
if (reg.hasTyped() && !NeedsPostBarrier(reg.type())) {
return;
}
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
masm.PushRegsInMask(liveVolatileRegs);
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
regs.takeUnchecked(obj);
regs.takeUnchecked(scratch);
Register indexReg;
if (index->isConstant()) {
indexReg = regs.takeAny();
masm.move32(Imm32(ToInt32(index) + indexDiff), indexReg);
} else {
indexReg = ToRegister(index);
regs.takeUnchecked(indexReg);
if (indexDiff != 0) {
masm.add32(Imm32(indexDiff), indexReg);
}
}
masm.setupUnalignedABICall(scratch);
masm.movePtr(ImmPtr(gen->runtime), scratch);
masm.passABIArg(scratch);
masm.passABIArg(obj);
masm.passABIArg(indexReg);
using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
masm.callWithABI<Fn, PostWriteElementBarrier>();
// We don't need a sub32 here because indexReg must be in liveVolatileRegs
// if indexDiff is not zero, so it will be restored below.
MOZ_ASSERT_IF(indexDiff != 0, liveVolatileRegs.has(indexReg));
masm.PopRegsInMask(liveVolatileRegs);
masm.jump(ool.rejoin());
});
addOutOfLineCode(ool, mir);
masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, ool->rejoin());
if (reg.hasValue()) {
masm.branchValueIsNurseryCell(Assembler::Equal, reg.valueReg(), scratch,
ool->entry());
} else {
masm.branchPtrInNurseryChunk(Assembler::Equal, reg.typedReg().gpr(),
scratch, ool->entry());
}
masm.bind(ool->rejoin());
}
void CodeGenerator::emitPostWriteBarrier(Register objreg) {
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
regs.takeUnchecked(objreg);
EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs);
}
void CodeGenerator::visitOutOfLineCallPostWriteBarrier(
OutOfLineCallPostWriteBarrier* ool) {
saveLiveVolatile(ool->lir());
const LAllocation* obj = ool->object();
emitPostWriteBarrier(obj);
restoreLiveVolatile(ool->lir());
masm.jump(ool->rejoin());
}
void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal,
OutOfLineCode* ool) {
// Check whether an object is a global that we have already barriered before
// calling into the VM.
//
// We only check for the script's global, not other globals within the same
// compartment, because we bake in a pointer to realm->globalWriteBarriered
// and doing that would be invalid for other realms because they could be
// collected before the Ion code is discarded.
if (!maybeGlobal->isConstant()) {
return;
}
JSObject* obj = &maybeGlobal->toConstant()->toObject();
if (gen->realm->maybeGlobal() != obj) {
return;
}
const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered();
masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0),
ool->rejoin());
}
template <class LPostBarrierType, MIRType nurseryType>
void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir,
OutOfLineCode* ool) {
static_assert(NeedsPostBarrier(nurseryType));
addOutOfLineCode(ool, lir->mir());
Register temp = ToTempRegisterOrInvalid(lir->temp0());
if (lir->object()->isConstant()) {
// Constant nursery objects cannot appear here, see
// LIRGenerator::visitPostWriteElementBarrier.
MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
} else {
masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
temp, ool->rejoin());
}
maybeEmitGlobalBarrierCheck(lir->object(), ool);
Register value = ToRegister(lir->value());
if constexpr (nurseryType == MIRType::Object) {
MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object);
} else if constexpr (nurseryType == MIRType::String) {
MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String);
} else {
static_assert(nurseryType == MIRType::BigInt);
MOZ_ASSERT(lir->mir()->value()->type() == MIRType::BigInt);
}
masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry());
masm.bind(ool->rejoin());
}
template <class LPostBarrierType>
void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir,
OutOfLineCode* ool) {
addOutOfLineCode(ool, lir->mir());
Register temp = ToTempRegisterOrInvalid(lir->temp0());
if (lir->object()->isConstant()) {
// Constant nursery objects cannot appear here, see
// LIRGenerator::visitPostWriteElementBarrier.
MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject()));
} else {
masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()),
temp, ool->rejoin());
}
maybeEmitGlobalBarrierCheck(lir->object(), ool);
ValueOperand value = ToValue(lir->value());
masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) {
auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
visitPostWriteBarrierCommon<LPostWriteBarrierO, MIRType::Object>(lir, ool);
}
void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) {
auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
visitPostWriteBarrierCommon<LPostWriteBarrierS, MIRType::String>(lir, ool);
}
void CodeGenerator::visitPostWriteBarrierBI(LPostWriteBarrierBI* lir) {
auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
visitPostWriteBarrierCommon<LPostWriteBarrierBI, MIRType::BigInt>(lir, ool);
}
void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) {
auto ool = new (alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
visitPostWriteBarrierCommonV(lir, ool);
}
// Out-of-line path to update the store buffer.
class OutOfLineCallPostWriteElementBarrier
: public OutOfLineCodeBase<CodeGenerator> {
LInstruction* lir_;
const LAllocation* object_;
const LAllocation* index_;
public:
OutOfLineCallPostWriteElementBarrier(LInstruction* lir,
const LAllocation* object,
const LAllocation* index)
: lir_(lir), object_(object), index_(index) {}
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineCallPostWriteElementBarrier(this);
}
LInstruction* lir() const { return lir_; }
const LAllocation* object() const { return object_; }
const LAllocation* index() const { return index_; }
};
void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(
OutOfLineCallPostWriteElementBarrier* ool) {
saveLiveVolatile(ool->lir());
const LAllocation* obj = ool->object();
const LAllocation* index = ool->index();
Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj);
Register indexreg = ToRegister(index);
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
regs.takeUnchecked(indexreg);
if (obj->isConstant()) {
objreg = regs.takeAny();
masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg);
} else {
regs.takeUnchecked(objreg);
}
Register runtimereg = regs.takeAny();
using Fn = void (*)(JSRuntime* rt, JSObject* obj, int32_t index);
masm.setupAlignedABICall();
masm.mov(ImmPtr(gen->runtime), runtimereg);
masm.passABIArg(runtimereg);
masm.passABIArg(objreg);
masm.passABIArg(indexreg);
masm.callWithABI<Fn, PostWriteElementBarrier>();
restoreLiveVolatile(ool->lir());
masm.jump(ool->rejoin());
}
void CodeGenerator::visitPostWriteElementBarrierO(
LPostWriteElementBarrierO* lir) {
auto ool = new (alloc())
OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
visitPostWriteBarrierCommon<LPostWriteElementBarrierO, MIRType::Object>(lir,
ool);
}
void CodeGenerator::visitPostWriteElementBarrierS(
LPostWriteElementBarrierS* lir) {
auto ool = new (alloc())
OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
visitPostWriteBarrierCommon<LPostWriteElementBarrierS, MIRType::String>(lir,
ool);
}
void CodeGenerator::visitPostWriteElementBarrierBI(
LPostWriteElementBarrierBI* lir) {
auto ool = new (alloc())
OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
visitPostWriteBarrierCommon<LPostWriteElementBarrierBI, MIRType::BigInt>(lir,
ool);
}
void CodeGenerator::visitPostWriteElementBarrierV(
LPostWriteElementBarrierV* lir) {
auto ool = new (alloc())
OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index());
visitPostWriteBarrierCommonV(lir, ool);
}
void CodeGenerator::visitAssertCanElidePostWriteBarrier(
LAssertCanElidePostWriteBarrier* lir) {
Register object = ToRegister(lir->object());
ValueOperand value = ToValue(lir->value());
Register temp = ToRegister(lir->temp0());
Label ok;
masm.branchPtrInNurseryChunk(Assembler::Equal, object, temp, &ok);
masm.branchValueIsNurseryCell(Assembler::NotEqual, value, temp, &ok);
masm.assumeUnreachable("Unexpected missing post write barrier");
masm.bind(&ok);
}
template <typename LCallIns>
void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
Register argContextReg, Register argUintNReg,
Register argVpReg, Register tempReg,
uint32_t unusedStack) {
masm.checkStackAlignment();
// Native functions have the signature:
// bool (*)(JSContext*, unsigned, Value* vp)
// Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
// are the function arguments.
// Allocate space for the outparam, moving the StackPointer to what will be
// &vp[1].
masm.adjustStack(unusedStack);
// Push a Value containing the callee object: natives are allowed to access
// their callee before setting the return value. The StackPointer is moved
// to &vp[0].
//
// Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
Register calleeReg = ToRegister(call->getCallee());
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
// Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToObjectRealm(calleeReg, tempReg);
}
} else {
WrappedFunction* target = call->mir()->getSingleTarget();
masm.Push(ObjectValue(*target->rawNativeJSFunction()));
// Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
masm.switchToObjectRealm(tempReg, tempReg);
}
}
// Preload arguments into registers.
masm.loadJSContext(argContextReg);
masm.moveStackPtrTo(argVpReg);
// Initialize |NativeExitFrameLayout::argc_|.
masm.Push(argUintNReg);
// Construct native exit frame.
//
// |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
// |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
//
// The NativeExitFrameLayout is now fully initialized.
uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
masm.enterFakeExitFrameForNative(argContextReg, tempReg,
call->mir()->isConstructing());
markSafepointAt(safepointOffset, call);
// Construct and execute call.
masm.setupAlignedABICall();
masm.passABIArg(argContextReg);
masm.passABIArg(argUintNReg);
masm.passABIArg(argVpReg);
ensureOsiSpace();
// If we're using a simulator build, `native` will already point to the
// simulator's call-redirection code for LCallClassHook. Load the address in
// a register first so that we don't try to redirect it a second time.
bool emittedCall = false;
#ifdef JS_SIMULATOR
if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
masm.movePtr(ImmPtr(native), tempReg);
masm.callWithABI(tempReg);
emittedCall = true;
}
#endif
if (!emittedCall) {
masm.callWithABI(DynamicFunction<JSNative>(native), ABIType::General,
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
}
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
// Exit the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
}
// Load the outparam vp[0] into output register(s).
masm.loadValue(
Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()),
JSReturnOperand);
// Until C++ code is instrumented against Spectre, prevent speculative
// execution from returning any private data.
if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
call->mir()->hasLiveDefUses()) {
masm.speculationBarrier();
}
#ifdef DEBUG
// Native constructors are guaranteed to return an Object value.
if (call->mir()->isConstructing()) {
Label notPrimitive;
masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
&notPrimitive);
masm.assumeUnreachable("native constructors don't return primitives");
masm.bind(&notPrimitive);
}
#endif
}
template <typename LCallIns>
void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
uint32_t unusedStack =
UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
// Registers used for callWithABI() argument-passing.
const Register argContextReg = ToRegister(call->getArgContextReg());
const Register argUintNReg = ToRegister(call->getArgUintNReg());
const Register argVpReg = ToRegister(call->getArgVpReg());
// Misc. temporary registers.
const Register tempReg = ToRegister(call->getTempReg());
DebugOnly<uint32_t> initialStack = masm.framePushed();
// Initialize the argc register.
masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
// Create the exit frame and call the native.
emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
unusedStack);
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
// Move the StackPointer back to its original location, unwinding the native
// exit frame.
masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack);
MOZ_ASSERT(masm.framePushed() == initialStack);
}
void CodeGenerator::visitCallNative(LCallNative* call) {
WrappedFunction* target = call->getSingleTarget();
MOZ_ASSERT(target);
MOZ_ASSERT(target->isNativeWithoutJitEntry());
JSNative native = target->native();
if (call->ignoresReturnValue() && target->hasJitInfo()) {
const JSJitInfo* jitInfo = target->jitInfo();
if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
native = jitInfo->ignoresReturnValueMethod;
}
}
emitCallNative(call, native);
}
void CodeGenerator::visitCallClassHook(LCallClassHook* call) {
emitCallNative(call, call->mir()->target());
}
static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv,
DOMObjectKind kind) {
// Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This
// will be in the first slot but may be fixed or non-fixed.
MOZ_ASSERT(obj != priv);
switch (kind) {
case DOMObjectKind::Native:
// If it's a native object, the value must be in a fixed slot.
// See CanAttachDOMCall in CacheIR.cpp.
masm.debugAssertObjHasFixedSlots(obj, priv);
masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv);
break;
case DOMObjectKind::Proxy: {
#ifdef DEBUG
// Sanity check: it must be a DOM proxy.
Label isDOMProxy;
masm.branchTestProxyHandlerFamily(
Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy);
masm.assumeUnreachable("Expected a DOM proxy");
masm.bind(&isDOMProxy);
#endif
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv);
masm.loadPrivate(
Address(priv, js::detail::ProxyReservedSlots::offsetOfSlot(0)), priv);
break;
}
}
}
void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) {
WrappedFunction* target = call->getSingleTarget();
MOZ_ASSERT(target);
MOZ_ASSERT(target->isNativeWithoutJitEntry());
MOZ_ASSERT(target->hasJitInfo());
MOZ_ASSERT(call->mir()->isCallDOMNative());
int unusedStack = UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
// Registers used for callWithABI() argument-passing.
const Register argJSContext = ToRegister(call->getArgJSContext());
const Register argObj = ToRegister(call->getArgObj());
const Register argPrivate = ToRegister(call->getArgPrivate());
const Register argArgs = ToRegister(call->getArgArgs());
DebugOnly<uint32_t> initialStack = masm.framePushed();
masm.checkStackAlignment();
// DOM methods have the signature:
// bool (*)(JSContext*, HandleObject, void* private, const
// JSJitMethodCallArgs& args)
// Where args is initialized from an argc and a vp, vp[0] is space for an
// outparam and the callee, vp[1] is |this|, and vp[2] onward are the
// function arguments. Note that args stores the argv, not the vp, and
// argv == vp + 2.
// Nestle the stack up against the pushed arguments, leaving StackPointer at
// &vp[1]
masm.adjustStack(unusedStack);
// argObj is filled with the extracted object, then returned.
Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj);
MOZ_ASSERT(obj == argObj);
// Push a Value containing the callee object: natives are allowed to access
// their callee before setting the return value. After this the StackPointer
// points to &vp[0].
masm.Push(ObjectValue(*target->rawNativeJSFunction()));
// Now compute the argv value. Since StackPointer is pointing to &vp[0] and
// argv is &vp[2] we just need to add 2*sizeof(Value) to the current
// StackPointer.
static_assert(JSJitMethodCallArgsTraits::offsetOfArgv == 0);
static_assert(JSJitMethodCallArgsTraits::offsetOfArgc ==
IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv);
masm.computeEffectiveAddress(
Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs);
LoadDOMPrivate(masm, obj, argPrivate,
static_cast<MCallDOMNative*>(call->mir())->objectKind());
// Push argc from the call instruction into what will become the IonExitFrame
masm.Push(Imm32(call->numActualArgs()));
// Push our argv onto the stack
masm.Push(argArgs);
// And store our JSJitMethodCallArgs* in argArgs.
masm.moveStackPtrTo(argArgs);
// Push |this| object for passing HandleObject. We push after argc to
// maintain the same sp-relative location of the object pointer with other
// DOMExitFrames.
masm.Push(argObj);
masm.moveStackPtrTo(argObj);
if (call->mir()->maybeCrossRealm()) {
// We use argJSContext as scratch register here.
masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), argJSContext);
masm.switchToObjectRealm(argJSContext, argJSContext);
}
bool preTenureWrapperAllocation =
call->mir()->to<MCallDOMNative>()->initialHeap() == gc::Heap::Tenured;
if (preTenureWrapperAllocation) {
auto ptr = ImmPtr(mirGen().realm->zone()->tenuringAllocSite());
masm.storeLocalAllocSite(ptr, argJSContext);
}
// Construct native exit frame.
uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext);
masm.loadJSContext(argJSContext);
masm.enterFakeExitFrame(argJSContext, argJSContext,
ExitFrameType::IonDOMMethod);
markSafepointAt(safepointOffset, call);
// Construct and execute call.
masm.setupAlignedABICall();
masm.loadJSContext(argJSContext);
masm.passABIArg(argJSContext);
masm.passABIArg(argObj);
masm.passABIArg(argPrivate);
masm.passABIArg(argArgs);
ensureOsiSpace();
masm.callWithABI(DynamicFunction<JSJitMethodOp>(target->jitInfo()->method),
ABIType::General,
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
if (target->jitInfo()->isInfallible) {
masm.loadValue(Address(masm.getStackPointer(),
IonDOMMethodExitFrameLayout::offsetOfResult()),
JSReturnOperand);
} else {
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
// Load the outparam vp[0] into output register(s).
masm.loadValue(Address(masm.getStackPointer(),
IonDOMMethodExitFrameLayout::offsetOfResult()),
JSReturnOperand);
}
static_assert(!JSReturnOperand.aliases(ReturnReg),
"Clobbering ReturnReg should not affect the return value");
// Switch back to the current realm if needed. Note: if the DOM method threw
// an exception, the exception handler will do this.
if (call->mir()->maybeCrossRealm()) {
masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
}
// Wipe out the preTenuring bit from the local alloc site
// On exception we handle this in C++
if (preTenureWrapperAllocation) {
masm.storeLocalAllocSite(ImmPtr(nullptr), ReturnReg);
}
// Until C++ code is instrumented against Spectre, prevent speculative
// execution from returning any private data.
if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) {
masm.speculationBarrier();
}
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
// Move the StackPointer back to its original location, unwinding the native
// exit frame.
masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack);
MOZ_ASSERT(masm.framePushed() == initialStack);
}
void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) {
pushArg(ImmGCPtr(lir->mir()->name()));
using Fn = bool (*)(JSContext* cx, Handle<PropertyName*>, MutableHandleValue);
callVM<Fn, GetIntrinsicValue>(lir);
}
void CodeGenerator::emitCallInvokeFunction(
LInstruction* call, Register calleereg, bool constructing,
bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) {
// Nestle %esp up to the argument vector.
// Each path must account for framePushed_ separately, for callVM to be valid.
masm.freeStack(unusedStack);
pushArg(masm.getStackPointer()); // argv.
pushArg(Imm32(argc)); // argc.
pushArg(Imm32(ignoresReturnValue));
pushArg(Imm32(constructing)); // constructing.
pushArg(calleereg); // JSFunction*.
using Fn = bool (*)(JSContext*, HandleObject, bool, bool, uint32_t, Value*,
MutableHandleValue);
callVM<Fn, jit::InvokeFunction>(call);
// Un-nestle %esp from the argument vector. No prefix was pushed.
masm.reserveStack(unusedStack);
}
void CodeGenerator::visitCallGeneric(LCallGeneric* call) {
// The callee is passed straight through to the trampoline.
MOZ_ASSERT(ToRegister(call->getCallee()) == IonGenericCallCalleeReg);
Register argcReg = ToRegister(call->getArgc());
uint32_t unusedStack =
UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
// Known-target case is handled by LCallKnown.
MOZ_ASSERT(!call->hasSingleTarget());
masm.checkStackAlignment();
masm.move32(Imm32(call->numActualArgs()), argcReg);
// Nestle the StackPointer up to the argument vector.
masm.freeStack(unusedStack);
ensureOsiSpace();
auto kind = call->mir()->isConstructing() ? IonGenericCallKind::Construct
: IonGenericCallKind::Call;
TrampolinePtr genericCallStub =
gen->jitRuntime()->getIonGenericCallStub(kind);
uint32_t callOffset = masm.callJit(genericCallStub);
markSafepointAt(callOffset, call);
if (call->mir()->maybeCrossRealm()) {
static_assert(!JSReturnOperand.aliases(ReturnReg),
"ReturnReg available as scratch after scripted calls");
masm.switchToRealm(gen