Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CodeGenerator.h"
#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include "mozilla/Casting.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EndianUtils.h"
#include "mozilla/EnumeratedArray.h"
#include "mozilla/EnumeratedRange.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Tuple.h"
#include "mozilla/Unused.h"
#include <type_traits>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "jsnum.h"
#include "builtin/Eval.h"
#include "builtin/MapObject.h"
#include "builtin/RegExp.h"
#include "builtin/SelfHostingDefines.h"
#include "builtin/String.h"
#include "builtin/TypedObject.h"
#include "gc/Nursery.h"
#include "irregexp/RegExpTypes.h"
#include "jit/BaselineCodeGen.h"
#include "jit/IonIC.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonScript.h"
#include "jit/JitcodeMap.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/Lowering.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
#include "jit/RangeAnalysis.h"
#include "jit/SharedICHelpers.h"
#include "jit/StackSlotAllocator.h"
#include "jit/VMFunctions.h"
#include "jit/WarpSnapshot.h"
#include "js/RegExpFlags.h" // JS::RegExpFlag
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/CheckedArithmetic.h"
#include "util/Unicode.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/BuiltinObjectKind.h"
#include "vm/EqualityOperations.h" // js::SameValue
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/MatchPairs.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "vm/RegExpObject.h"
#include "vm/RegExpStatics.h"
#include "vm/StringType.h"
#include "vm/TraceLogging.h"
#include "vm/TypedArrayObject.h"
#ifdef MOZ_VTUNE
# include "vtune/VTuneWrapper.h"
#endif
#include "wasm/WasmGC.h"
#include "wasm/WasmStubs.h"
#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "jit/TemplateObject-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using mozilla::AssertedCast;
using mozilla::DebugOnly;
using mozilla::FloatingPoint;
using mozilla::Maybe;
using mozilla::NegativeInfinity;
using mozilla::PositiveInfinity;
namespace js {
namespace jit {
#ifdef CHECK_OSIPOINT_REGISTERS
template <class Op>
static void HandleRegisterDump(Op op, MacroAssembler& masm,
LiveRegisterSet liveRegs, Register activation,
Register scratch) {
const size_t baseOffset = JitActivation::offsetOfRegs();
// Handle live GPRs.
for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
Register reg = *iter;
Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
if (reg == activation) {
// To use the original value of the activation register (that's
// now on top of the stack), we need the scratch register.
masm.push(scratch);
masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
op(scratch, dump);
masm.pop(scratch);
} else {
op(reg, dump);
}
}
// Handle live FPRs.
for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
FloatRegister reg = *iter;
Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
op(reg, dump);
}
}
class StoreOp {
MacroAssembler& masm;
public:
explicit StoreOp(MacroAssembler& masm) : masm(masm) {}
void operator()(Register reg, Address dump) { masm.storePtr(reg, dump); }
void operator()(FloatRegister reg, Address dump) {
if (reg.isDouble()) {
masm.storeDouble(reg, dump);
} else if (reg.isSingle()) {
masm.storeFloat32(reg, dump);
} else if (reg.isSimd128()) {
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
}
};
class VerifyOp {
MacroAssembler& masm;
Label* failure_;
public:
VerifyOp(MacroAssembler& masm, Label* failure)
: masm(masm), failure_(failure) {}
void operator()(Register reg, Address dump) {
masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
}
void operator()(FloatRegister reg, Address dump) {
if (reg.isDouble()) {
ScratchDoubleScope scratch(masm);
masm.loadDouble(dump, scratch);
masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
} else if (reg.isSingle()) {
ScratchFloat32Scope scratch(masm);
masm.loadFloat32(dump, scratch);
masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
} else if (reg.isSimd128()) {
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
}
};
void CodeGenerator::verifyOsiPointRegs(LSafepoint* safepoint) {
// Ensure the live registers stored by callVM did not change between
// the call and this OsiPoint. Try-catch relies on this invariant.
// Load pointer to the JitActivation in a scratch register.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
// If we should not check registers (because the instruction did not call
// into the VM, or a GC happened), we're done.
Label failure, done;
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
// Having more than one VM function call made in one visit function at
// runtime is a sec-ciritcal error, because if we conservatively assume that
// one of the function call can re-enter Ion, then the invalidation process
// will potentially add a call at a random location, by patching the code
// before the return address.
masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
// Set checkRegs to 0, so that we don't try to verify registers after we
// return from this script to the caller.
masm.store32(Imm32(0), checkRegs);
// Ignore clobbered registers. Some instructions (like LValueToInt32) modify
// temps after calling into the VM. This is fine because no other
// instructions (including this OsiPoint) will depend on them. Also
// backtracking can also use the same register for an input and an output.
// These are marked as clobbered and shouldn't get checked.
LiveRegisterSet liveRegs;
liveRegs.set() = RegisterSet::Intersect(
safepoint->liveRegs().set(),
RegisterSet::Not(safepoint->clobberedRegs().set()));
VerifyOp op(masm, &failure);
HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
masm.jump(&done);
// Do not profile the callWithABI that occurs below. This is to avoid a
// rare corner case that occurs when profiling interacts with itself:
//
// When slow profiling assertions are turned on, FunctionBoundary ops
// (which update the profiler pseudo-stack) may emit a callVM, which
// forces them to have an osi point associated with them. The
// FunctionBoundary for inline function entry is added to the caller's
// graph with a PC from the caller's code, but during codegen it modifies
// Gecko Profiler instrumentation to add the callee as the current top-most
// script. When codegen gets to the OSIPoint, and the callWithABI below is
// emitted, the codegen thinks that the current frame is the callee, but
// the PC it's using from the OSIPoint refers to the caller. This causes
// the profiler instrumentation of the callWithABI below to ASSERT, since
// the script and pc are mismatched. To avoid this, we simply omit
// instrumentation for these callWithABIs.
// Any live register captured by a safepoint (other than temp registers)
// must remain unchanged between the call and the OsiPoint instruction.
masm.bind(&failure);
masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
masm.bind(&done);
masm.pop(scratch);
}
bool CodeGenerator::shouldVerifyOsiPointRegs(LSafepoint* safepoint) {
if (!checkOsiPointRegisters) {
return false;
}
if (safepoint->liveRegs().emptyGeneral() &&
safepoint->liveRegs().emptyFloat()) {
return false; // No registers to check.
}
return true;
}
void CodeGenerator::resetOsiPointRegs(LSafepoint* safepoint) {
if (!shouldVerifyOsiPointRegs(safepoint)) {
return;
}
// Set checkRegs to 0. If we perform a VM call, the instruction
// will set it to 1.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.store32(Imm32(0), checkRegs);
masm.pop(scratch);
}
static void StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs) {
// Store a copy of all live registers before performing the call.
// When we reach the OsiPoint, we can use this to check nothing
// modified them in the meantime.
// Load pointer to the JitActivation in a scratch register.
AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
Register scratch = allRegs.takeAny();
masm.push(scratch);
masm.loadJitActivation(scratch);
Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
masm.add32(Imm32(1), checkRegs);
StoreOp op(masm);
HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
masm.pop(scratch);
}
#endif // CHECK_OSIPOINT_REGISTERS
// Before doing any call to Cpp, you should ensure that volatile
// registers are evicted by the register allocator.
void CodeGenerator::callVMInternal(VMFunctionId id, LInstruction* ins,
const Register* dynStack) {
TrampolinePtr code = gen->jitRuntime()->getVMWrapper(id);
const VMFunctionData& fun = GetVMFunction(id);
// Stack is:
// ... frame ...
// [args]
#ifdef DEBUG
MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
pushedArgs_ = 0;
#endif
#ifdef CHECK_OSIPOINT_REGISTERS
if (shouldVerifyOsiPointRegs(ins->safepoint())) {
StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
}
#endif
#ifdef DEBUG
if (ins->mirRaw()) {
MOZ_ASSERT(ins->mirRaw()->isInstruction());
MInstruction* mir = ins->mirRaw()->toInstruction();
MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
// If this MIR instruction has an overridden AliasSet, set the JitRuntime's
// disallowArbitraryCode_ flag so we can assert this VMFunction doesn't call
// RunScript. Whitelist MInterruptCheck and MCheckOverRecursed because
// interrupt callbacks can call JS (chrome JS or shell testing functions).
bool isWhitelisted = mir->isInterruptCheck() || mir->isCheckOverRecursed();
if (!mir->hasDefaultAliasSet() && !isWhitelisted) {
const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
masm.move32(Imm32(1), ReturnReg);
masm.store32(ReturnReg, AbsoluteAddress(addr));
}
}
#endif
// Push an exit frame descriptor. If |dynStack| is a valid pointer to a
// register, then its value is added to the value of the |framePushed()| to
// fill the frame descriptor.
if (dynStack) {
masm.addPtr(Imm32(masm.framePushed()), *dynStack);
masm.makeFrameDescriptor(*dynStack, FrameType::IonJS,
ExitFrameLayout::Size());
masm.Push(*dynStack); // descriptor
} else {
masm.pushStaticFrameDescriptor(FrameType::IonJS, ExitFrameLayout::Size());
}
// Call the wrapper function. The wrapper is in charge to unwind the stack
// when returning from the call. Failures are handled with exceptions based
// on the return value of the C functions. To guard the outcome of the
// returned value, use another LIR instruction.
uint32_t callOffset = masm.callJit(code);
markSafepointAt(callOffset, ins);
#ifdef DEBUG
// Reset the disallowArbitraryCode flag after the call.
{
const void* addr = gen->jitRuntime()->addressOfDisallowArbitraryCode();
masm.push(ReturnReg);
masm.move32(Imm32(0), ReturnReg);
masm.store32(ReturnReg, AbsoluteAddress(addr));
masm.pop(ReturnReg);
}
#endif
// Remove rest of the frame left on the stack. We remove the return address
// which is implicitly poped when returning.
int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
// Pop arguments from framePushed.
masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
// Stack is:
// ... frame ...
}
template <typename Fn, Fn fn>
void CodeGenerator::callVM(LInstruction* ins, const Register* dynStack) {
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
callVMInternal(id, ins, dynStack);
}
// ArgSeq store arguments for OutOfLineCallVM.
//
// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
// this function is an instance of a class which provides a "generate" in charge
// of pushing the argument, with "pushArg", for a VMFunction.
//
// Such list of arguments can be created by using the "ArgList" function which
// creates one instance of "ArgSeq", where the type of the arguments are
// inferred from the type of the arguments.
//
// The list of arguments must be written in the same order as if you were
// calling the function in C++.
//
// Example:
// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
template <typename... ArgTypes>
class ArgSeq {
mozilla::Tuple<std::remove_reference_t<ArgTypes>...> args_;
template <std::size_t... ISeq>
inline void generate(CodeGenerator* codegen,
std::index_sequence<ISeq...>) const {
// Arguments are pushed in reverse order, from last argument to first
// argument.
(codegen->pushArg(mozilla::Get<sizeof...(ISeq) - 1 - ISeq>(args_)), ...);
}
public:
explicit ArgSeq(ArgTypes&&... args)
: args_(std::forward<ArgTypes>(args)...) {}
inline void generate(CodeGenerator* codegen) const {
generate(codegen, std::index_sequence_for<ArgTypes...>{});
}
#ifdef DEBUG
static constexpr size_t numArgs = sizeof...(ArgTypes);
#endif
};
template <typename... ArgTypes>
inline ArgSeq<ArgTypes...> ArgList(ArgTypes&&... args) {
return ArgSeq<ArgTypes...>(std::forward<ArgTypes>(args)...);
}
// Store wrappers, to generate the right move of data after the VM call.
struct StoreNothing {
inline void generate(CodeGenerator* codegen) const {}
inline LiveRegisterSet clobbered() const {
return LiveRegisterSet(); // No register gets clobbered
}
};
class StoreRegisterTo {
private:
Register out_;
public:
explicit StoreRegisterTo(Register out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
// It's okay to use storePointerResultTo here - the VMFunction wrapper
// ensures the upper bytes are zero for bool/int32 return values.
codegen->storePointerResultTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
class StoreFloatRegisterTo {
private:
FloatRegister out_;
public:
explicit StoreFloatRegisterTo(FloatRegister out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
codegen->storeFloatResultTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
template <typename Output>
class StoreValueTo_ {
private:
Output out_;
public:
explicit StoreValueTo_(const Output& out) : out_(out) {}
inline void generate(CodeGenerator* codegen) const {
codegen->storeResultValueTo(out_);
}
inline LiveRegisterSet clobbered() const {
LiveRegisterSet set;
set.add(out_);
return set;
}
};
template <typename Output>
StoreValueTo_<Output> StoreValueTo(const Output& out) {
return StoreValueTo_<Output>(out);
}
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
class OutOfLineCallVM : public OutOfLineCodeBase<CodeGenerator> {
private:
LInstruction* lir_;
ArgSeq args_;
StoreOutputTo out_;
public:
OutOfLineCallVM(LInstruction* lir, const ArgSeq& args,
const StoreOutputTo& out)
: lir_(lir), args_(args), out_(out) {}
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineCallVM(this);
}
LInstruction* lir() const { return lir_; }
const ArgSeq& args() const { return args_; }
const StoreOutputTo& out() const { return out_; }
};
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
OutOfLineCode* CodeGenerator::oolCallVM(LInstruction* lir, const ArgSeq& args,
const StoreOutputTo& out) {
MOZ_ASSERT(lir->mirRaw());
MOZ_ASSERT(lir->mirRaw()->isInstruction());
#ifdef DEBUG
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
const VMFunctionData& fun = GetVMFunction(id);
MOZ_ASSERT(fun.explicitArgs == args.numArgs);
MOZ_ASSERT(fun.returnsData() !=
(std::is_same_v<StoreOutputTo, StoreNothing>));
#endif
OutOfLineCode* ool = new (alloc())
OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>(lir, args, out);
addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
return ool;
}
template <typename Fn, Fn fn, class ArgSeq, class StoreOutputTo>
void CodeGenerator::visitOutOfLineCallVM(
OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool) {
LInstruction* lir = ool->lir();
saveLive(lir);
ool->args().generate(this);
callVM<Fn, fn>(lir);
ool->out().generate(this);
restoreLiveIgnore(lir, ool->out().clobbered());
masm.jump(ool->rejoin());
}
class OutOfLineICFallback : public OutOfLineCodeBase<CodeGenerator> {
private:
LInstruction* lir_;
size_t cacheIndex_;
size_t cacheInfoIndex_;
public:
OutOfLineICFallback(LInstruction* lir, size_t cacheIndex,
size_t cacheInfoIndex)
: lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) {}
void bind(MacroAssembler* masm) override {
// The binding of the initial jump is done in
// CodeGenerator::visitOutOfLineICFallback.
}
size_t cacheIndex() const { return cacheIndex_; }
size_t cacheInfoIndex() const { return cacheInfoIndex_; }
LInstruction* lir() const { return lir_; }
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineICFallback(this);
}
};
void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) {
if (cacheIndex == SIZE_MAX) {
masm.setOOM();
return;
}
DataPtr<IonIC> cache(this, cacheIndex);
MInstruction* mir = lir->mirRaw()->toInstruction();
if (mir->resumePoint()) {
cache->setScriptedLocation(mir->block()->info().script(),
mir->resumePoint()->pc());
} else {
cache->setIdempotent();
}
Register temp = cache->scratchRegisterForEntryJump();
icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp);
masm.jump(Address(temp, 0));
MOZ_ASSERT(!icInfo_.empty());
OutOfLineICFallback* ool =
new (alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1);
addOutOfLineCode(ool, mir);
masm.bind(ool->rejoin());
cache->setRejoinOffset(CodeOffset(ool->rejoin()->offset()));
}
void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) {
LInstruction* lir = ool->lir();
size_t cacheIndex = ool->cacheIndex();
size_t cacheInfoIndex = ool->cacheInfoIndex();
DataPtr<IonIC> ic(this, cacheIndex);
// Register the location of the OOL path in the IC.
ic->setFallbackOffset(CodeOffset(masm.currentOffset()));
switch (ic->kind()) {
case CacheKind::GetProp:
case CacheKind::GetElem: {
IonGetPropertyIC* getPropIC = ic->asGetPropertyIC();
saveLive(lir);
pushArg(getPropIC->id());
pushArg(getPropIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonGetPropertyIC*,
HandleValue, HandleValue, MutableHandleValue);
callVM<Fn, IonGetPropertyIC::update>(lir);
StoreValueTo(getPropIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetPropSuper:
case CacheKind::GetElemSuper: {
IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC();
saveLive(lir);
pushArg(getPropSuperIC->id());
pushArg(getPropSuperIC->receiver());
pushArg(getPropSuperIC->object());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
bool (*)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject,
HandleValue, HandleValue, MutableHandleValue);
callVM<Fn, IonGetPropSuperIC::update>(lir);
StoreValueTo(getPropSuperIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreValueTo(getPropSuperIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::SetProp:
case CacheKind::SetElem: {
IonSetPropertyIC* setPropIC = ic->asSetPropertyIC();
saveLive(lir);
pushArg(setPropIC->rhs());
pushArg(setPropIC->id());
pushArg(setPropIC->object());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonSetPropertyIC*,
HandleObject, HandleValue, HandleValue);
callVM<Fn, IonSetPropertyIC::update>(lir);
restoreLive(lir);
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetName: {
IonGetNameIC* getNameIC = ic->asGetNameIC();
saveLive(lir);
pushArg(getNameIC->environment());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonGetNameIC*, HandleObject,
MutableHandleValue);
callVM<Fn, IonGetNameIC::update>(lir);
StoreValueTo(getNameIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::BindName: {
IonBindNameIC* bindNameIC = ic->asBindNameIC();
saveLive(lir);
pushArg(bindNameIC->environment());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn =
JSObject* (*)(JSContext*, HandleScript, IonBindNameIC*, HandleObject);
callVM<Fn, IonBindNameIC::update>(lir);
StoreRegisterTo(bindNameIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::GetIterator: {
IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC();
saveLive(lir);
pushArg(getIteratorIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = JSObject* (*)(JSContext*, HandleScript, IonGetIteratorIC*,
HandleValue);
callVM<Fn, IonGetIteratorIC::update>(lir);
StoreRegisterTo(getIteratorIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreRegisterTo(getIteratorIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::In: {
IonInIC* inIC = ic->asInIC();
saveLive(lir);
pushArg(inIC->object());
pushArg(inIC->key());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonInIC*, HandleValue,
HandleObject, bool*);
callVM<Fn, IonInIC::update>(lir);
StoreRegisterTo(inIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::HasOwn: {
IonHasOwnIC* hasOwnIC = ic->asHasOwnIC();
saveLive(lir);
pushArg(hasOwnIC->id());
pushArg(hasOwnIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue,
HandleValue, int32_t*);
callVM<Fn, IonHasOwnIC::update>(lir);
StoreRegisterTo(hasOwnIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::CheckPrivateField: {
IonCheckPrivateFieldIC* checkPrivateFieldIC = ic->asCheckPrivateFieldIC();
saveLive(lir);
pushArg(checkPrivateFieldIC->id());
pushArg(checkPrivateFieldIC->value());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonCheckPrivateFieldIC*,
HandleValue, HandleValue, bool*);
callVM<Fn, IonCheckPrivateFieldIC::update>(lir);
StoreRegisterTo(checkPrivateFieldIC->output()).generate(this);
restoreLiveIgnore(
lir, StoreRegisterTo(checkPrivateFieldIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::InstanceOf: {
IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC();
saveLive(lir);
pushArg(hasInstanceOfIC->rhs());
pushArg(hasInstanceOfIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext*, HandleScript, IonInstanceOfIC*,
HandleValue lhs, HandleObject rhs, bool* res);
callVM<Fn, IonInstanceOfIC::update>(lir);
StoreRegisterTo(hasInstanceOfIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreRegisterTo(hasInstanceOfIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::UnaryArith: {
IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC();
saveLive(lir);
pushArg(unaryArithIC->input());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
IonUnaryArithIC * stub, HandleValue val,
MutableHandleValue res);
callVM<Fn, IonUnaryArithIC::update>(lir);
StoreValueTo(unaryArithIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::ToPropertyKey: {
IonToPropertyKeyIC* toPropertyKeyIC = ic->asToPropertyKeyIC();
saveLive(lir);
pushArg(toPropertyKeyIC->input());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
IonToPropertyKeyIC * ic, HandleValue val,
MutableHandleValue res);
callVM<Fn, IonToPropertyKeyIC::update>(lir);
StoreValueTo(toPropertyKeyIC->output()).generate(this);
restoreLiveIgnore(lir,
StoreValueTo(toPropertyKeyIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::BinaryArith: {
IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC();
saveLive(lir);
pushArg(binaryArithIC->rhs());
pushArg(binaryArithIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
IonBinaryArithIC * stub, HandleValue lhs,
HandleValue rhs, MutableHandleValue res);
callVM<Fn, IonBinaryArithIC::update>(lir);
StoreValueTo(binaryArithIC->output()).generate(this);
restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::Compare: {
IonCompareIC* compareIC = ic->asCompareIC();
saveLive(lir);
pushArg(compareIC->rhs());
pushArg(compareIC->lhs());
icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1));
pushArg(ImmGCPtr(gen->outerInfo().script()));
using Fn = bool (*)(JSContext * cx, HandleScript outerScript,
IonCompareIC * stub, HandleValue lhs, HandleValue rhs,
bool* res);
callVM<Fn, IonCompareIC::update>(lir);
StoreRegisterTo(compareIC->output()).generate(this);
restoreLiveIgnore(lir, StoreRegisterTo(compareIC->output()).clobbered());
masm.jump(ool->rejoin());
return;
}
case CacheKind::Call:
case CacheKind::TypeOf:
case CacheKind::ToBool:
case CacheKind::GetIntrinsic:
case CacheKind::NewObject:
MOZ_CRASH("Unsupported IC");
}
MOZ_CRASH();
}
StringObject* MNewStringObject::templateObj() const {
return &templateObj_->as<StringObject>();
}
CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph,
MacroAssembler* masm)
: CodeGeneratorSpecific(gen, graph, masm),
ionScriptLabels_(gen->alloc()),
ionNurseryObjectLabels_(gen->alloc()),
scriptCounts_(nullptr),
realmStubsToReadBarrier_(0) {}
CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); }
class OutOfLineZeroIfNaN : public OutOfLineCodeBase<CodeGenerator> {
LInstruction* lir_;
FloatRegister input_;
Register output_;
public:
OutOfLineZeroIfNaN(LInstruction* lir, FloatRegister input, Register output)
: lir_(lir), input_(input), output_(output) {}
void accept(CodeGenerator* codegen) override {
codegen->visitOutOfLineZeroIfNaN(this);
}
LInstruction* lir() const { return lir_; }
FloatRegister input() const { return input_; }
Register output() const { return output_; }
};
void CodeGenerator::visitValueToInt32(LValueToInt32* lir) {
ValueOperand operand = ToValue(lir, LValueToInt32::Input);
Register output = ToRegister(lir->output());
FloatRegister temp = ToFloatRegister(lir->tempFloat());
MDefinition* input;
if (lir->mode() == LValueToInt32::NORMAL) {
input = lir->mirNormal()->input();
} else if (lir->mode() == LValueToInt32::TRUNCATE_NOWRAP) {
input = lir->mirTruncateNoWrap()->input();
} else {
input = lir->mirTruncate()->input();
}
Label fails;
if (lir->mode() == LValueToInt32::TRUNCATE) {
OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
// We can only handle strings in truncation contexts, like bitwise
// operations.
Label* stringEntry;
Label* stringRejoin;
Register stringReg;
if (input->mightBeType(MIRType::String)) {
stringReg = ToRegister(lir->temp());
using Fn = bool (*)(JSContext*, JSString*, double*);
OutOfLineCode* oolString = oolCallVM<Fn, StringToNumber>(
lir, ArgList(stringReg), StoreFloatRegisterTo(temp));
stringEntry = oolString->entry();
stringRejoin = oolString->rejoin();
} else {
stringReg = InvalidReg;
stringEntry = nullptr;
stringRejoin = nullptr;
}
masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin,
oolDouble->entry(), stringReg, temp, output,
&fails);
masm.bind(oolDouble->rejoin());
} else if (lir->mode() == LValueToInt32::TRUNCATE_NOWRAP) {
auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, temp, output);
addOutOfLineCode(ool, lir->mir());
masm.truncateNoWrapValueToInt32(operand, input, temp, output, ool->entry(),
&fails);
masm.bind(ool->rejoin());
} else {
masm.convertValueToInt32(operand, input, temp, output, &fails,
lir->mirNormal()->canBeNegativeZero(),
lir->mirNormal()->conversion());
}
bailoutFrom(&fails, lir->snapshot());
}
void CodeGenerator::visitOutOfLineZeroIfNaN(OutOfLineZeroIfNaN* ool) {
FloatRegister input = ool->input();
Register output = ool->output();
// NaN triggers the failure path for branchTruncateDoubleToInt32() on x86,
// x64, and ARM64, so handle it here. In all other cases bail out.
Label fails;
if (input.isSingle()) {
masm.branchFloat(Assembler::DoubleOrdered, input, input, &fails);
} else {
masm.branchDouble(Assembler::DoubleOrdered, input, input, &fails);
}
// ToInteger(NaN) is 0.
masm.move32(Imm32(0), output);
masm.jump(ool->rejoin());
bailoutFrom(&fails, ool->lir()->snapshot());
}
void CodeGenerator::visitValueToDouble(LValueToDouble* lir) {
MToDouble* mir = lir->mir();
ValueOperand operand = ToValue(lir, LValueToDouble::Input);
FloatRegister output = ToFloatRegister(lir->output());
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
{
ScratchTagScope tag(masm, operand);
masm.splitTagForTest(operand, tag);
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
}
bailout(lir->snapshot());
if (hasNull) {
masm.bind(&isNull);
masm.loadConstantDouble(0.0, output);
masm.jump(&done);
}
if (hasUndefined) {
masm.bind(&isUndefined);
masm.loadConstantDouble(GenericNaN(), output);
masm.jump(&done);
}
if (hasBoolean) {
masm.bind(&isBool);
masm.boolValueToDouble(operand, output);
masm.jump(&done);
}
masm.bind(&isInt32);
masm.int32ValueToDouble(operand, output);
masm.jump(&done);
masm.bind(&isDouble);
masm.unboxDouble(operand, output);
masm.bind(&done);
}
void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) {
MToFloat32* mir = lir->mir();
ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
FloatRegister output = ToFloatRegister(lir->output());
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
{
ScratchTagScope tag(masm, operand);
masm.splitTagForTest(operand, tag);
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
}
bailout(lir->snapshot());
if (hasNull) {
masm.bind(&isNull);
masm.loadConstantFloat32(0.0f, output);
masm.jump(&done);
}
if (hasUndefined) {
masm.bind(&isUndefined);
masm.loadConstantFloat32(float(GenericNaN()), output);
masm.jump(&done);
}
if (hasBoolean) {
masm.bind(&isBool);
masm.boolValueToFloat32(operand, output);
masm.jump(&done);
}
masm.bind(&isInt32);
masm.int32ValueToFloat32(operand, output);
masm.jump(&done);
masm.bind(&isDouble);
// ARM and MIPS may not have a double register available if we've
// allocated output as a float32.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
ScratchDoubleScope fpscratch(masm);
masm.unboxDouble(operand, fpscratch);
masm.convertDoubleToFloat32(fpscratch, output);
#else
masm.unboxDouble(operand, output);
masm.convertDoubleToFloat32(output, output);
#endif
masm.bind(&done);
}
void CodeGenerator::visitValueToBigInt(LValueToBigInt* lir) {
ValueOperand operand = ToValue(lir, LValueToBigInt::Input);
Register output = ToRegister(lir->output());
bool maybeBigInt = lir->mir()->input()->mightBeType(MIRType::BigInt);
bool maybeBool = lir->mir()->input()->mightBeType(MIRType::Boolean);
bool maybeString = lir->mir()->input()->mightBeType(MIRType::String);
Maybe<OutOfLineCode*> ool;
if (maybeBool || maybeString) {
using Fn = BigInt* (*)(JSContext*, HandleValue);
ool = mozilla::Some(oolCallVM<Fn, ToBigInt>(lir, ArgList(operand),
StoreRegisterTo(output)));
}
Register tag = masm.extractTag(operand, output);
Label done;
if (maybeBigInt) {
Label notBigInt;
masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
masm.unboxBigInt(operand, output);
masm.jump(&done);
masm.bind(&notBigInt);
}
if (maybeBool) {
masm.branchTestBoolean(Assembler::Equal, tag, (*ool)->entry());
}
if (maybeString) {
masm.branchTestString(Assembler::Equal, tag, (*ool)->entry());
}
// ToBigInt(object) can have side-effects; all other types throw a TypeError.
bailout(lir->snapshot());
if (ool) {
masm.bind((*ool)->rejoin());
}
if (maybeBigInt) {
masm.bind(&done);
}
}
void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) {
masm.convertInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) {
masm.convertFloat32ToDouble(ToFloatRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) {
masm.convertDoubleToFloat32(ToFloatRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) {
masm.convertInt32ToFloat32(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) {
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertDoubleToInt32(input, output, &fail,
lir->mir()->canBeNegativeZero());
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) {
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertFloat32ToInt32(input, output, &fail,
lir->mir()->canBeNegativeZero());
bailoutFrom(&fail, lir->snapshot());
}
void CodeGenerator::visitDoubleToIntegerInt32(LDoubleToIntegerInt32* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, input, output);
addOutOfLineCode(ool, lir->mir());
masm.branchTruncateDoubleToInt32(input, output, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::visitFloat32ToIntegerInt32(LFloat32ToIntegerInt32* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
auto* ool = new (alloc()) OutOfLineZeroIfNaN(lir, input, output);
addOutOfLineCode(ool, lir->mir());
masm.branchTruncateFloat32ToInt32(input, output, ool->entry());
masm.bind(ool->rejoin());
}
void CodeGenerator::emitOOLTestObject(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch) {
saveVolatile(scratch);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(objreg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
masm.storeCallBoolResult(scratch);
restoreVolatile(scratch);
masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
masm.jump(ifDoesntEmulateUndefined);
}
// Base out-of-line code generator for all tests of the truthiness of an
// object, where the object might not be truthy. (Recall that per spec all
// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
// flag to permit objects to look like |undefined| in certain contexts,
// including in object truthiness testing.) We check truthiness inline except
// when we're testing it on a proxy (or if TI guarantees us that the specified
// object will never emulate |undefined|), in which case out-of-line code will
// call EmulatesUndefined for a conclusive answer.
class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> {
Register objreg_;
Register scratch_;
Label* ifEmulatesUndefined_;
Label* ifDoesntEmulateUndefined_;
#ifdef DEBUG
bool initialized() { return ifEmulatesUndefined_ != nullptr; }
#endif
public:
OutOfLineTestObject()
: ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) {}
void accept(CodeGenerator* codegen) final {
MOZ_ASSERT(initialized());
codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_,
ifDoesntEmulateUndefined_, scratch_);
}
// Specify the register where the object to be tested is found, labels to
// jump to if the object is truthy or falsy, and a scratch register for
// use in the out-of-line path.
void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch) {
MOZ_ASSERT(!initialized());
MOZ_ASSERT(ifEmulatesUndefined);
objreg_ = objreg;
scratch_ = scratch;
ifEmulatesUndefined_ = ifEmulatesUndefined;
ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
}
};
// A subclass of OutOfLineTestObject containing two extra labels, for use when
// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
// code. The user should bind these labels in inline code, and specify them as
// targets via setInputAndTargets, as appropriate.
class OutOfLineTestObjectWithLabels : public OutOfLineTestObject {
Label label1_;
Label label2_;
public:
OutOfLineTestObjectWithLabels() = default;
Label* label1() { return &label1_; }
Label* label2() { return &label2_; }
};
void CodeGenerator::testObjectEmulatesUndefinedKernel(
Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch,
OutOfLineTestObject* ool) {
ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
scratch);
// Perform a fast-path check of the object's class flags if the object's
// not a proxy. Let out-of-line code handle the slow cases that require
// saving registers, making a function call, and restoring registers.
masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(),
ifEmulatesUndefined);
}
void CodeGenerator::branchTestObjectEmulatesUndefined(
Register objreg, Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined, Register scratch,
OutOfLineTestObject* ool) {
MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
"ifDoesntEmulateUndefined will be bound to the fallthrough path");
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
ifDoesntEmulateUndefined, scratch, ool);
masm.bind(ifDoesntEmulateUndefined);
}
void CodeGenerator::testObjectEmulatesUndefined(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch,
OutOfLineTestObject* ool) {
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined,
ifDoesntEmulateUndefined, scratch, ool);
masm.jump(ifDoesntEmulateUndefined);
}
void CodeGenerator::testValueTruthyKernel(
const ValueOperand& value, const LDefinition* scratch1,
const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy,
Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) {
// Count the number of possible type tags we might have, so we'll know when
// we've checked them all and hence can avoid emitting a tag check for the
// last one. In particular, whenever tagCount is 1 that means we've tried
// all but one of them already so we know exactly what's left based on the
// mightBe* booleans.
bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined);
bool mightBeNull = valueMIR->mightBeType(MIRType::Null);
bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean);
bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32);
bool mightBeObject = valueMIR->mightBeType(MIRType::Object);
bool mightBeString = valueMIR->mightBeType(MIRType::String);
bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol);
bool mightBeDouble = valueMIR->mightBeType(MIRType::Double);
bool mightBeBigInt = valueMIR->mightBeType(MIRType::BigInt);
int tagCount = int(mightBeUndefined) + int(mightBeNull) +
int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) +
int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble) +
int(mightBeBigInt);
MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0);
// If we know we're null or undefined, we're definitely falsy, no
// need to even check the tag.
if (int(mightBeNull) + int(mightBeUndefined) == tagCount) {
masm.jump(ifFalsy);
return;
}
ScratchTagScope tag(masm, value);
masm.splitTagForTest(value, tag);
if (mightBeUndefined) {
MOZ_ASSERT(tagCount > 1);
masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy);
--tagCount;
}
if (mightBeNull) {
MOZ_ASSERT(tagCount > 1);
masm.branchTestNull(Assembler::Equal, tag, ifFalsy);
--tagCount;
}
if (mightBeBoolean) {
MOZ_ASSERT(tagCount != 0);
Label notBoolean;
if (tagCount != 1) {
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
}
{
ScratchTagScopeRelease _(&tag);
masm.branchTestBooleanTruthy(false, value, ifFalsy);
}
if (tagCount != 1) {
masm.jump(ifTruthy);
}
// Else just fall through to truthiness.
masm.bind(&notBoolean);
--tagCount;
}
if (mightBeInt32) {
MOZ_ASSERT(tagCount != 0);
Label notInt32;
if (tagCount != 1) {
masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
}
{
ScratchTagScopeRelease _(&tag);
masm.branchTestInt32Truthy(false, value, ifFalsy);
}
if (tagCount != 1) {
masm.jump(ifTruthy);
}
// Else just fall through to truthiness.
masm.bind(&notInt32);
--tagCount;
}
if (mightBeObject) {
MOZ_ASSERT(tagCount != 0);
if (ool) {
Label notObject;
if (tagCount != 1) {
masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
}
{
ScratchTagScopeRelease _(&tag);
Register objreg = masm.extractObject(value, ToRegister(scratch1));
testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy,
ToRegister(scratch2), ool);
}
masm.bind(&notObject);
} else {
if (tagCount != 1) {
masm.branchTestObject(Assembler::Equal, tag, ifTruthy);
}
// Else just fall through to truthiness.
}
--tagCount;
} else {
MOZ_ASSERT(!ool,
"We better not have an unused OOL path, since the code "
"generator will try to "
"generate code for it but we never set up its labels, which "
"will cause null "
"derefs of those labels.");
}
if (mightBeString) {
// Test if a string is non-empty.
MOZ_ASSERT(tagCount != 0);
Label notString;
if (tagCount != 1) {
masm.branchTestString(Assembler::NotEqual, tag, &notString);
}
{
ScratchTagScopeRelease _(&tag);
masm.branchTestStringTruthy(false, value, ifFalsy);
}
if (tagCount != 1) {
masm.jump(ifTruthy);
}
// Else just fall through to truthiness.
masm.bind(&notString);
--tagCount;
}
if (mightBeBigInt) {
MOZ_ASSERT(tagCount != 0);
Label notBigInt;
if (tagCount != 1) {
masm.branchTestBigInt(Assembler::NotEqual, tag, &notBigInt);
}
{
ScratchTagScopeRelease _(&tag);
masm.branchTestBigIntTruthy(false, value, ifFalsy);
}
if (tagCount != 1) {
masm.jump(ifTruthy);
}
masm.bind(&notBigInt);
--tagCount;
}
if (mightBeSymbol) {
// All symbols are truthy.
MOZ_ASSERT(tagCount != 0);
if (tagCount != 1) {
masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
}
// Else fall through to ifTruthy.
--tagCount;
}
if (mightBeDouble) {
MOZ_ASSERT(tagCount == 1);
// If we reach here the value is a double.
{
ScratchTagScopeRelease _(&tag);
masm.unboxDouble(value, fr);
masm.branchTestDoubleTruthy(false, fr, ifFalsy);
}
--tagCount;
}
MOZ_ASSERT(tagCount == 0);
// Fall through for truthy.
}
void CodeGenerator::testValueTruthy(const ValueOperand& value,
const LDefinition* scratch1,
const LDefinition* scratch2,
FloatRegister fr, Label* ifTruthy,
Label* ifFalsy, OutOfLineTestObject* ool,
MDefinition* valueMIR) {
testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool,
valueMIR);
masm.jump(ifTruthy);
}
void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) {
MIRType inputType = lir->mir()->input()->type();
MOZ_ASSERT(inputType == MIRType::ObjectOrNull ||
lir->mir()->operandMightEmulateUndefined(),
"If the object couldn't emulate undefined, this should have been "
"folded.");
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
Register input = ToRegister(lir->input());
if (lir->mir()->operandMightEmulateUndefined()) {
if (inputType == MIRType::ObjectOrNull) {
masm.branchTestPtr(Assembler::Zero, input, input, falsy);
}
OutOfLineTestObject* ool = new (alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()),
ool);
} else {
MOZ_ASSERT(inputType == MIRType::ObjectOrNull);
testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(),
lir->ifFalsy());
}
}
void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) {
OutOfLineTestObject* ool = nullptr;
MDefinition* input = lir->mir()->input();
// Unfortunately, it's possible that someone (e.g. phi elimination) switched
// out our input after we did cacheOperandMightEmulateUndefined. So we
// might think it can emulate undefined _and_ know that it can't be an
// object.
if (lir->mir()->operandMightEmulateUndefined() &&
input->mightBeType(MIRType::Object)) {
ool = new (alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
}
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
testValueTruthy(ToValue(lir, LTestVAndBranch::Input), lir->temp1(),
lir->temp2(), ToFloatRegister(lir->tempFloat()), truthy,
falsy, ool, input);
}
void CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir) {
MFunctionDispatch* mir = lir->mir();
Register input = ToRegister(lir->input());
// Compare function pointers
for (size_t i = 0; i < mir->numCases(); i++) {
MOZ_ASSERT(i < mir->numCases());
LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) {
masm.branchTestObjGroupUnsafe(Assembler::Equal, input, funcGroup,
target->label());
} else {
JSFunction* func = mir->getCase(i);
masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
}
}
// If at the end, and we have a fallback, we can jump to the fallback block.
if (mir->hasFallback()) {
masm.jump(skipTrivialBlocks(mir->getFallback())->lir()->label());
return;
}
// Otherwise, crash.
masm.assumeUnreachable("Did not match input function!");
}
void CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir) {
MObjectGroupDispatch* mir = lir->mir();
Register input = ToRegister(lir->input());
Register temp = ToRegister(lir->temp());
// Load the incoming ObjectGroup in temp.
masm.loadObjGroupUnsafe(input, temp);
// Compare ObjectGroups.
MacroAssembler::BranchGCPtr lastBranch;
LBlock* lastBlock = nullptr;
InlinePropertyTable* propTable = mir->propTable();
for (size_t i = 0; i < mir->numCases(); i++) {
JSFunction* func = mir->getCase(i);
LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
DebugOnly<bool> found = false;
// Find the function in the prop table.
for (size_t j = 0; j < propTable->numEntries(); j++) {
if (propTable->getFunction(j) != func) {
continue;
}
// Emit the previous prop's jump.
if (lastBranch.isInitialized()) {
lastBranch.emit(masm);
}
// Setup jump for next iteration.
ObjectGroup* group = propTable->getObjectGroup(j);
lastBranch = MacroAssembler::BranchGCPtr(
Assembler::Equal, temp, ImmGCPtr(group), target->label());
lastBlock = target;
found = true;
}
MOZ_ASSERT(found);
}
// At this point the final case branch hasn't been emitted.
// Jump to fallback block if we have an unknown ObjectGroup. If there's no
// fallback block, we should have handled all cases.
if (!mir->hasFallback()) {
MOZ_ASSERT(lastBranch.isInitialized());
Label ok;
// Change the target of the branch to OK.
lastBranch.relink(&ok);
lastBranch.emit(masm);
masm.assumeUnreachable("Unexpected ObjectGroup");
masm.bind(&ok);
// If we don't naturally fall through to the target,
// then jump to the target.
if (!isNextBlock(lastBlock)) {
masm.jump(lastBlock->label());