Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/CacheIRCompiler.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/FunctionTypeTraits.h"
#include "mozilla/MaybeOneOf.h"
#include "mozilla/ScopeExit.h"
#include <type_traits>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "builtin/DataViewObject.h"
#include "builtin/Object.h"
#include "gc/GCEnum.h"
#include "jit/BaselineCacheIRCompiler.h"
#include "jit/CacheIRGenerator.h"
#include "jit/IonCacheIRCompiler.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitZone.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/VMFunctions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
#include "js/ScalarType.h" // js::Scalar::Type
#include "js/SweepingAPI.h"
#include "proxy/DOMProxy.h"
#include "proxy/Proxy.h"
#include "proxy/ScriptedProxyHandler.h"
#include "util/DifferentialTesting.h"
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BigIntType.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/GeneratorObject.h"
#include "vm/GetterSetter.h"
#include "vm/Interpreter.h"
#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Uint8Clamped.h"
#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/VMFunctionList-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::Maybe;
using JS::ExpandoAndGeneration;
ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
ValOperandId op) {
OperandLocation& loc = operandLocations_[op.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
currentOpRegs_.add(loc.valueReg());
return loc.valueReg();
case OperandLocation::ValueStack: {
ValueOperand reg = allocateValueRegister(masm);
popValue(masm, &loc, reg);
return reg;
}
case OperandLocation::BaselineFrame: {
ValueOperand reg = allocateValueRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Constant: {
ValueOperand reg = allocateValueRegister(masm);
masm.moveValue(loc.constant(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadReg: {
// Temporarily add the payload register to currentOpRegs_ so
// allocateValueRegister will stay away from it.
currentOpRegs_.add(loc.payloadReg());
ValueOperand reg = allocateValueRegister(masm);
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
currentOpRegs_.take(loc.payloadReg());
availableRegs_.add(loc.payloadReg());
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadStack: {
ValueOperand reg = allocateValueRegister(masm);
popPayload(masm, &loc, reg.scratchReg());
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::DoubleReg: {
ValueOperand reg = allocateValueRegister(masm);
{
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), reg, fpscratch);
}
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
// Load a value operand directly into a float register. Caller must have
// guarded isNumber on the provided val.
void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
NumberOperandId op,
FloatRegister dest) const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
const OperandLocation& loc = operandLocations_[op.id()];
Label failure, done;
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.ensureDouble(loc.valueReg(), dest, &failure);
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::DoubleReg: {
masm.moveDouble(loc.doubleReg(), dest);
return;
}
case OperandLocation::Constant: {
MOZ_ASSERT(loc.constant().isNumber(),
"Caller must ensure the operand is a number value");
masm.loadConstantDouble(loc.constant().toNumber(), dest);
return;
}
case OperandLocation::PayloadReg: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
masm.convertInt32ToDouble(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.convertInt32ToDouble(addr, dest);
return;
}
case OperandLocation::Uninitialized:
MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
return;
}
masm.jump(&done);
masm.bind(&failure);
masm.assumeUnreachable(
"Missing guard allowed non-number to hit ensureDoubleRegister");
masm.bind(&done);
}
void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
TypedOperandId typedId,
Register dest) const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
const OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::PayloadReg: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
masm.mov(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.loadPtr(addr, dest);
return;
}
case OperandLocation::DoubleReg:
case OperandLocation::Constant:
case OperandLocation::Uninitialized:
MOZ_CRASH("Unhandled operand location");
}
}
void CacheRegisterAllocator::copyToScratchValueRegister(
MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
const OperandLocation& loc = operandLocations_[valId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(loc.valueReg(), dest);
break;
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
masm.loadValue(addr, dest);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, dest);
break;
}
case OperandLocation::Constant:
masm.moveValue(loc.constant(), dest);
break;
case OperandLocation::PayloadReg:
masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
break;
case OperandLocation::PayloadStack: {
Address addr = payloadAddress(masm, &loc);
masm.loadPtr(addr, dest.scratchReg());
masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
break;
}
case OperandLocation::DoubleReg: {
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), dest, fpscratch);
break;
}
case OperandLocation::Uninitialized:
MOZ_CRASH();
}
}
Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
currentOpRegs_.add(loc.payloadReg());
return loc.payloadReg();
case OperandLocation::ValueReg: {
// It's possible the value is still boxed: as an optimization, we unbox
// the first time we use a value as object.
ValueOperand val = loc.valueReg();
availableRegs_.add(val);
Register reg = val.scratchReg();
availableRegs_.take(reg);
masm.unboxNonDouble(val, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
currentOpRegs_.add(reg);
return reg;
}
case OperandLocation::PayloadStack: {
Register reg = allocateRegister(masm);
popPayload(masm, &loc, reg);
return reg;
}
case OperandLocation::ValueStack: {
// The value is on the stack, but boxed. If it's on top of the stack we
// unbox it and then remove it from the stack, else we just unbox.
Register reg = allocateRegister(masm);
if (loc.valueStack() == stackPushed_) {
masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
typedId.type());
masm.addToStackPtr(Imm32(sizeof(js::Value)));
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
stackPushed_ -= sizeof(js::Value);
} else {
MOZ_ASSERT(loc.valueStack() < stackPushed_);
masm.unboxNonDouble(
Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
reg, typedId.type());
}
loc.setPayloadReg(reg, typedId.type());
return reg;
}
case OperandLocation::BaselineFrame: {
Register reg = allocateRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.unboxNonDouble(addr, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
return reg;
};
case OperandLocation::Constant: {
Value v = loc.constant();
Register reg = allocateRegister(masm);
if (v.isString()) {
masm.movePtr(ImmGCPtr(v.toString()), reg);
} else if (v.isSymbol()) {
masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
} else if (v.isBigInt()) {
masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
} else if (v.isBoolean()) {
masm.movePtr(ImmWord(v.toBoolean() ? 1 : 0), reg);
} else {
MOZ_CRASH("Unexpected Value");
}
loc.setPayloadReg(reg, v.extractNonDoubleType());
return reg;
}
case OperandLocation::DoubleReg:
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
MacroAssembler& masm, ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::Constant:
return loc.constant();
case OperandLocation::PayloadReg:
case OperandLocation::PayloadStack: {
JSValueType payloadType = loc.payloadType();
Register reg = useRegister(masm, TypedOperandId(val, payloadType));
return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
AnyRegister(reg));
}
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return TypedOrValueRegister(useValueRegister(masm, val));
case OperandLocation::DoubleReg:
return TypedOrValueRegister(MIRType::Double,
AnyRegister(loc.doubleReg()));
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
Register reg = allocateRegister(masm);
loc.setPayloadReg(reg, typedId.type());
return reg;
}
ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
ValueOperand reg = allocateValueRegister(masm);
loc.setValueReg(reg);
return reg;
}
void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
// See if any operands are dead so we can reuse their registers. Note that
// we skip the input operands, as those are also used by failure paths, and
// we currently don't track those uses.
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
i++) {
if (!writer_.operandIsDead(i, currentInstruction_)) {
continue;
}
OperandLocation& loc = operandLocations_[i];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::ValueReg:
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::PayloadStack:
masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
break;
case OperandLocation::ValueStack:
masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
break;
case OperandLocation::Uninitialized:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
break;
}
loc.setUninitialized();
}
}
void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
// This should only be called when we are no longer using the operands,
// as we're discarding everything from the native stack. Set all operand
// locations to Uninitialized to catch bugs.
for (size_t i = 0; i < operandLocations_.length(); i++) {
operandLocations_[i].setUninitialized();
}
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
freePayloadSlots_.clear();
freeValueSlots_.clear();
}
Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
if (availableRegs_.empty()) {
freeDeadOperandLocations(masm);
}
if (availableRegs_.empty()) {
// Still no registers available, try to spill unused operands to
// the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
Register reg = loc.payloadReg();
if (currentOpRegs_.has(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break; // We got a register, so break out of the loop.
}
if (loc.kind() == OperandLocation::ValueReg) {
ValueOperand reg = loc.valueReg();
if (currentOpRegs_.aliases(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break; // Break out of the loop.
}
}
}
if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
Register reg = availableRegsAfterSpill_.takeAny();
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
availableRegs_.add(reg);
}
// At this point, there must be a free register.
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
Register reg = availableRegs_.takeAny();
currentOpRegs_.add(reg);
return reg;
}
void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
Register reg) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
// Fixed registers should be allocated first, to ensure they're
// still available.
MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
freeDeadOperandLocations(masm);
if (availableRegs_.has(reg)) {
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
// Register may be available only after spilling contents.
if (availableRegsAfterSpill_.has(reg)) {
availableRegsAfterSpill_.take(reg);
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
currentOpRegs_.add(reg);
return;
}
// The register must be used by some operand. Spill it to the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
if (loc.payloadReg() != reg) {
continue;
}
spillOperandToStackOrRegister(masm, &loc);
currentOpRegs_.add(reg);
return;
}
if (loc.kind() == OperandLocation::ValueReg) {
if (!loc.valueReg().aliases(reg)) {
continue;
}
ValueOperand valueReg = loc.valueReg();
spillOperandToStackOrRegister(masm, &loc);
availableRegs_.add(valueReg);
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
}
MOZ_CRASH("Invalid register");
}
void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
ValueOperand reg) {
#ifdef JS_NUNBOX32
allocateFixedRegister(masm, reg.payloadReg());
allocateFixedRegister(masm, reg.typeReg());
#else
allocateFixedRegister(masm, reg.valueReg());
#endif
}
#ifdef JS_NUNBOX32
MOZ_NEVER_INLINE
#endif
ValueOperand CacheRegisterAllocator::allocateValueRegister(
MacroAssembler& masm) {
#ifdef JS_NUNBOX32
Register reg1 = allocateRegister(masm);
Register reg2 = allocateRegister(masm);
return ValueOperand(reg1, reg2);
#else
Register reg = allocateRegister(masm);
return ValueOperand(reg);
#endif
}
bool CacheRegisterAllocator::init() {
if (!origInputLocations_.resize(writer_.numInputOperands())) {
return false;
}
if (!operandLocations_.resize(writer_.numOperandIds())) {
return false;
}
return true;
}
void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
// Registers not in availableRegs_ and not used by input operands are
// available after being spilled.
availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
GeneralRegisterSet::Not(availableRegs_.set()),
GeneralRegisterSet::Not(inputRegisterSet()));
}
void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
// If IC inputs alias each other, make sure they are stored in different
// locations so we don't have to deal with this complexity in the rest of
// the allocator.
//
// Note that this can happen in IonMonkey with something like |o.foo = o|
// or |o[i] = i|.
size_t numInputs = writer_.numInputOperands();
MOZ_ASSERT(origInputLocations_.length() == numInputs);
for (size_t i = 1; i < numInputs; i++) {
OperandLocation& loc1 = operandLocations_[i];
if (!loc1.isInRegister()) {
continue;
}
for (size_t j = 0; j < i; j++) {
OperandLocation& loc2 = operandLocations_[j];
if (!loc1.aliasesReg(loc2)) {
continue;
}
// loc1 and loc2 alias so we spill one of them. If one is a
// ValueReg and the other is a PayloadReg, we have to spill the
// PayloadReg: spilling the ValueReg instead would leave its type
// register unallocated on 32-bit platforms.
if (loc1.kind() == OperandLocation::ValueReg) {
spillOperandToStack(masm, &loc2);
} else {
MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
spillOperandToStack(masm, &loc1);
break; // Spilled loc1, so nothing else will alias it.
}
}
}
#ifdef DEBUG
assertValidState();
#endif
}
GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
AllocatableGeneralRegisterSet result;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(loc == origInputLocations_[i]);
switch (loc.kind()) {
case OperandLocation::PayloadReg:
result.addUnchecked(loc.payloadReg());
continue;
case OperandLocation::ValueReg:
result.addUnchecked(loc.valueReg());
continue;
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
continue;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
return result.set();
}
JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
const OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return JSVAL_TYPE_UNKNOWN;
case OperandLocation::PayloadStack:
case OperandLocation::PayloadReg:
return loc.payloadType();
case OperandLocation::Constant:
return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
: loc.constant().extractNonDoubleType();
case OperandLocation::DoubleReg:
return JSVAL_TYPE_DOUBLE;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
void CacheRegisterAllocator::initInputLocation(
size_t i, const TypedOrValueRegister& reg) {
if (reg.hasValue()) {
initInputLocation(i, reg.valueReg());
} else if (reg.typedReg().isFloat()) {
MOZ_ASSERT(reg.type() == MIRType::Double);
initInputLocation(i, reg.typedReg().fpu());
} else {
initInputLocation(i, reg.typedReg().gpr(),
ValueTypeFromMIRType(reg.type()));
}
}
void CacheRegisterAllocator::initInputLocation(
size_t i, const ConstantOrRegister& value) {
if (value.constant()) {
initInputLocation(i, value.value());
} else {
initInputLocation(i, value.reg());
}
}
void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
if (loc->kind() == OperandLocation::ValueReg) {
if (!freeValueSlots_.empty()) {
uint32_t stackPos = freeValueSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storeValue(loc->valueReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setValueStack(stackPos);
return;
}
stackPushed_ += sizeof(js::Value);
masm.pushValue(loc->valueReg());
loc->setValueStack(stackPushed_);
return;
}
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!freePayloadSlots_.empty()) {
uint32_t stackPos = freePayloadSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storePtr(loc->payloadReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setPayloadStack(stackPos, loc->payloadType());
return;
}
stackPushed_ += sizeof(uintptr_t);
masm.push(loc->payloadReg());
loc->setPayloadStack(stackPushed_, loc->payloadType());
}
void CacheRegisterAllocator::spillOperandToStackOrRegister(
MacroAssembler& masm, OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
// If enough registers are available, use them.
if (loc->kind() == OperandLocation::ValueReg) {
static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
if (availableRegs_.set().size() >= BoxPieces) {
ValueOperand reg = availableRegs_.takeAnyValue();
masm.moveValue(loc->valueReg(), reg);
loc->setValueReg(reg);
return;
}
} else {
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!availableRegs_.empty()) {
Register reg = availableRegs_.takeAny();
masm.movePtr(loc->payloadReg(), reg);
loc->setPayloadReg(reg, loc->payloadType());
return;
}
}
// Not enough registers available, spill to the stack.
spillOperandToStack(masm, loc);
}
void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
OperandLocation* loc, Register dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
// The payload is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->payloadStack() == stackPushed_) {
masm.pop(dest);
stackPushed_ -= sizeof(uintptr_t);
} else {
MOZ_ASSERT(loc->payloadStack() < stackPushed_);
masm.loadPtr(payloadAddress(masm, loc), dest);
masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
}
loc->setPayloadReg(dest, loc->payloadType());
}
Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
const OperandLocation* loc) const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
}
Address CacheRegisterAllocator::payloadAddress(
MacroAssembler& masm, const OperandLocation* loc) const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
}
void CacheRegisterAllocator::popValue(MacroAssembler& masm,
OperandLocation* loc, ValueOperand dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
// The Value is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->valueStack() == stackPushed_) {
masm.popValue(dest);
stackPushed_ -= sizeof(js::Value);
} else {
MOZ_ASSERT(loc->valueStack() < stackPushed_);
masm.loadValue(
Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
dest);
masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
}
loc->setValueReg(dest);
}
#ifdef DEBUG
void CacheRegisterAllocator::assertValidState() const {
// Assert different operands don't have aliasing storage. We depend on this
// when spilling registers, for instance.
if (!JitOptions.fullDebugChecks) {
return;
}
for (size_t i = 0; i < operandLocations_.length(); i++) {
const auto& loc1 = operandLocations_[i];
if (loc1.isUninitialized()) {
continue;
}
for (size_t j = 0; j < i; j++) {
const auto& loc2 = operandLocations_[j];
if (loc2.isUninitialized()) {
continue;
}
MOZ_ASSERT(!loc1.aliasesReg(loc2));
}
}
}
#endif
bool OperandLocation::aliasesReg(const OperandLocation& other) const {
MOZ_ASSERT(&other != this);
switch (other.kind_) {
case PayloadReg:
return aliasesReg(other.payloadReg());
case ValueReg:
return aliasesReg(other.valueReg());
case PayloadStack:
case ValueStack:
case BaselineFrame:
case Constant:
case DoubleReg:
return false;
case Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
bool shouldDiscardStack) {
size_t numInputOperands = origInputLocations_.length();
MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
for (size_t j = 0; j < numInputOperands; j++) {
const OperandLocation& dest = origInputLocations_[j];
OperandLocation& cur = operandLocations_[j];
if (dest == cur) {
continue;
}
auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
// We have a cycle if a destination register will be used later
// as source register. If that happens, just push the current value
// on the stack and later get it from there.
for (size_t k = j + 1; k < numInputOperands; k++) {
OperandLocation& laterSource = operandLocations_[k];
if (dest.aliasesReg(laterSource)) {
spillOperandToStack(masm, &laterSource);
}
}
if (dest.kind() == OperandLocation::ValueReg) {
// We have to restore a Value register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(cur.valueReg(), dest.valueReg());
continue;
case OperandLocation::PayloadReg:
masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
continue;
case OperandLocation::PayloadStack: {
Register scratch = dest.valueReg().scratchReg();
popPayload(masm, &cur, scratch);
masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
continue;
}
case OperandLocation::ValueStack:
popValue(masm, &cur, dest.valueReg());
continue;
case OperandLocation::DoubleReg:
masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
continue;
case OperandLocation::Constant:
case OperandLocation::BaselineFrame:
case OperandLocation::Uninitialized:
break;
}
} else if (dest.kind() == OperandLocation::PayloadReg) {
// We have to restore a payload register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
dest.payloadType());
continue;
case OperandLocation::PayloadReg:
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
masm.mov(cur.payloadReg(), dest.payloadReg());
continue;
case OperandLocation::PayloadStack: {
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
popPayload(masm, &cur, dest.payloadReg());
continue;
}
case OperandLocation::ValueStack:
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
MOZ_ASSERT(cur.valueStack() <= stackPushed_);
MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
masm.unboxNonDouble(
Address(masm.getStackPointer(), stackPushed_ - cur.valueStack()),
dest.payloadReg(), dest.payloadType());
continue;
case OperandLocation::Constant:
case OperandLocation::BaselineFrame:
case OperandLocation::DoubleReg:
case OperandLocation::Uninitialized:
break;
}
} else if (dest.kind() == OperandLocation::Constant ||
dest.kind() == OperandLocation::BaselineFrame ||
dest.kind() == OperandLocation::DoubleReg) {
// Nothing to do.
continue;
}
MOZ_CRASH("Invalid kind");
}
for (const SpilledRegister& spill : spilledRegs_) {
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
if (spill.stackPushed == stackPushed_) {
masm.pop(spill.reg);
stackPushed_ -= sizeof(uintptr_t);
} else {
MOZ_ASSERT(spill.stackPushed < stackPushed_);
masm.loadPtr(
Address(masm.getStackPointer(), stackPushed_ - spill.stackPushed),
spill.reg);
}
}
if (shouldDiscardStack) {
discardStack(masm);
}
}
size_t CacheIRStubInfo::stubDataSize() const {
size_t field = 0;
size_t size = 0;
while (true) {
StubField::Type type = fieldType(field++);
if (type == StubField::Type::Limit) {
return size;
}
size += StubField::sizeInBytes(type);
}
}
template <typename T>
static GCPtr<T>* AsGCPtr(void* ptr) {
return static_cast<GCPtr<T>*>(ptr);
}
void CacheIRStubInfo::replaceStubRawWord(uint8_t* stubData, uint32_t offset,
uintptr_t oldWord,
uintptr_t newWord) const {
MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
uintptr_t* addr = reinterpret_cast<uintptr_t*>(stubData + offset);
MOZ_ASSERT(*addr == oldWord);
*addr = newWord;
}
void CacheIRStubInfo::replaceStubRawValueBits(uint8_t* stubData,
uint32_t offset, uint64_t oldBits,
uint64_t newBits) const {
MOZ_ASSERT(uint64_t(stubData + offset) % sizeof(uint64_t) == 0);
uint64_t* addr = reinterpret_cast<uint64_t*>(stubData + offset);
MOZ_ASSERT(*addr == oldBits);
*addr = newBits;
}
template <class Stub, StubField::Type type>
typename MapStubFieldToType<type>::WrappedType& CacheIRStubInfo::getStubField(
Stub* stub, uint32_t offset) const {
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
using WrappedType = typename MapStubFieldToType<type>::WrappedType;
return *reinterpret_cast<WrappedType*>(stubData + offset);
}
#define INSTANTIATE_GET_STUB_FIELD(Type) \
template typename MapStubFieldToType<Type>::WrappedType& \
CacheIRStubInfo::getStubField<ICCacheIRStub, Type>(ICCacheIRStub * stub, \
uint32_t offset) const;
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Shape)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakShape)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakGetterSetter)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::JSObject)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakObject)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Symbol)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::String)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::WeakBaseScript)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Value)
INSTANTIATE_GET_STUB_FIELD(StubField::Type::Id)
#undef INSTANTIATE_GET_STUB_FIELD
template <class Stub, class T>
T* CacheIRStubInfo::getPtrStubField(Stub* stub, uint32_t offset) const {
uint8_t* stubData = (uint8_t*)stub + stubDataOffset_;
MOZ_ASSERT(uintptr_t(stubData + offset) % sizeof(uintptr_t) == 0);
return *reinterpret_cast<T**>(stubData + offset);
}
template gc::AllocSite* CacheIRStubInfo::getPtrStubField(ICCacheIRStub* stub,
uint32_t offset) const;
template <StubField::Type type, typename V>
static void InitWrappedPtr(void* ptr, V val) {
using RawType = typename MapStubFieldToType<type>::RawType;
using WrappedType = typename MapStubFieldToType<type>::WrappedType;
auto* wrapped = static_cast<WrappedType*>(ptr);
new (wrapped) WrappedType(mozilla::BitwiseCast<RawType>(val));
}
static void InitWordStubField(StubField::Type type, void* dest,
uintptr_t value) {
MOZ_ASSERT(StubField::sizeIsWord(type));
MOZ_ASSERT((uintptr_t(dest) % sizeof(uintptr_t)) == 0,
"Unaligned stub field");
switch (type) {
case StubField::Type::RawInt32:
case StubField::Type::RawPointer:
case StubField::Type::AllocSite:
*static_cast<uintptr_t*>(dest) = value;
break;
case StubField::Type::Shape:
InitWrappedPtr<StubField::Type::Shape>(dest, value);
break;
case StubField::Type::WeakShape:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakShape>(dest, value);
break;
case StubField::Type::WeakGetterSetter:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakGetterSetter>(dest, value);
break;
case StubField::Type::JSObject:
InitWrappedPtr<StubField::Type::JSObject>(dest, value);
break;
case StubField::Type::WeakObject:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakObject>(dest, value);
break;
case StubField::Type::Symbol:
InitWrappedPtr<StubField::Type::Symbol>(dest, value);
break;
case StubField::Type::String:
InitWrappedPtr<StubField::Type::String>(dest, value);
break;
case StubField::Type::WeakBaseScript:
// No read barrier required to copy weak pointer.
InitWrappedPtr<StubField::Type::WeakBaseScript>(dest, value);
break;
case StubField::Type::JitCode:
InitWrappedPtr<StubField::Type::JitCode>(dest, value);
break;
case StubField::Type::Id:
AsGCPtr<jsid>(dest)->init(jsid::fromRawBits(value));
break;
case StubField::Type::RawInt64:
case StubField::Type::Double:
case StubField::Type::Value:
case StubField::Type::Limit:
MOZ_CRASH("Invalid type");
}
}
static void InitInt64StubField(StubField::Type type, void* dest,
uint64_t value) {
MOZ_ASSERT(StubField::sizeIsInt64(type));
MOZ_ASSERT((uintptr_t(dest) % sizeof(uint64_t)) == 0, "Unaligned stub field");
switch (type) {
case StubField::Type::RawInt64:
case StubField::Type::Double:
*static_cast<uint64_t*>(dest) = value;
break;
case StubField::Type::Value:
AsGCPtr<Value>(dest)->init(Value::fromRawBits(value));
break;
case StubField::Type::RawInt32:
case StubField::Type::RawPointer:
case StubField::Type::AllocSite:
case StubField::Type::Shape:
case StubField::Type::WeakShape:
case StubField::Type::WeakGetterSetter:
case StubField::Type::JSObject:
case StubField::Type::WeakObject:
case StubField::Type::Symbol:
case StubField::Type::String:
case StubField::Type::WeakBaseScript:
case StubField::Type::JitCode:
case StubField::Type::Id:
case StubField::Type::Limit:
MOZ_CRASH("Invalid type");
}
}
void CacheIRWriter::copyStubData(uint8_t* dest) const {
MOZ_ASSERT(!failed());
for (const StubField& field : stubFields_) {
if (field.sizeIsWord()) {
InitWordStubField(field.type(), dest, field.asWord());
dest += sizeof(uintptr_t);
} else {
InitInt64StubField(field.type(), dest, field.asInt64());
dest += sizeof(uint64_t);
}
}
}
ICCacheIRStub* ICCacheIRStub::clone(JSRuntime* rt, ICStubSpace& newSpace) {
const CacheIRStubInfo* info = stubInfo();
MOZ_ASSERT(info->makesGCCalls());
size_t bytesNeeded = info->stubDataOffset() + info->stubDataSize();
AutoEnterOOMUnsafeRegion oomUnsafe;
void* newStubMem = newSpace.alloc(bytesNeeded);
if (!newStubMem) {
oomUnsafe.crash("ICCacheIRStub::clone");
}
ICCacheIRStub* newStub = new (newStubMem) ICCacheIRStub(*this);
const uint8_t* src = this->stubDataStart();
uint8_t* dest = newStub->stubDataStart();
// Because this can be called during sweeping when discarding JIT code, we
// have to lock the store buffer
gc::AutoLockStoreBuffer lock(rt);
uint32_t field = 0;
while (true) {
StubField::Type type = info->fieldType(field);
if (type == StubField::Type::Limit) {
break; // Done.
}
if (StubField::sizeIsWord(type)) {
const uintptr_t* srcField = reinterpret_cast<const uintptr_t*>(src);
InitWordStubField(type, dest, *srcField);
src += sizeof(uintptr_t);
dest += sizeof(uintptr_t);
} else {
const uint64_t* srcField = reinterpret_cast<const uint64_t*>(src);
InitInt64StubField(type, dest, *srcField);
src += sizeof(uint64_t);
dest += sizeof(uint64_t);
}
field++;
}
return newStub;
}
template <typename T>
static inline bool ShouldTraceWeakEdgeInStub(JSTracer* trc) {
if constexpr (std::is_same_v<T, IonICStub>) {
// 'Weak' edges are traced strongly in IonICs.
return true;
} else {
static_assert(std::is_same_v<T, ICCacheIRStub>);
return trc->traceWeakEdges();
}
}
template <typename T>
void jit::TraceCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
uint32_t field = 0;
size_t offset = 0;
while (true) {
Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case Type::RawInt32:
case Type::RawPointer:
case Type::RawInt64:
case Type::Double:
break;
case Type::Shape: {
// For CCW IC stubs, we can store same-zone but cross-compartment
// shapes. Use TraceSameZoneCrossCompartmentEdge to not assert in the
// GC. Note: CacheIRWriter::writeShapeField asserts we never store
// cross-zone shapes.
GCPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::Shape>(stub, offset);
TraceSameZoneCrossCompartmentEdge(trc, &shapeField, "cacheir-shape");
break;
}
case Type::WeakShape:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
WeakHeapPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
if (shapeField) {
TraceSameZoneCrossCompartmentEdge(trc, &shapeField,
"cacheir-weak-shape");
}
}
break;
case Type::WeakGetterSetter:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc,
&stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset),
"cacheir-weak-getter-setter");
}
break;
case Type::JSObject: {
TraceEdge(trc, &stubInfo->getStubField<T, Type::JSObject>(stub, offset),
"cacheir-object");
break;
}
case Type::WeakObject:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc, &stubInfo->getStubField<T, Type::WeakObject>(stub, offset),
"cacheir-weak-object");
}
break;
case Type::Symbol:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Symbol>(stub, offset),
"cacheir-symbol");
break;
case Type::String:
TraceEdge(trc, &stubInfo->getStubField<T, Type::String>(stub, offset),
"cacheir-string");
break;
case Type::WeakBaseScript:
if (ShouldTraceWeakEdgeInStub<T>(trc)) {
TraceNullableEdge(
trc,
&stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset),
"cacheir-weak-script");
}
break;
case Type::JitCode:
TraceEdge(trc, &stubInfo->getStubField<T, Type::JitCode>(stub, offset),
"cacheir-jitcode");
break;
case Type::Id:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Id>(stub, offset),
"cacheir-id");
break;
case Type::Value:
TraceEdge(trc, &stubInfo->getStubField<T, Type::Value>(stub, offset),
"cacheir-value");
break;
case Type::AllocSite: {
gc::AllocSite* site =
stubInfo->getPtrStubField<T, gc::AllocSite>(stub, offset);
site->trace(trc);
break;
}
case Type::Limit:
return; // Done.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
template void jit::TraceCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
const CacheIRStubInfo* stubInfo);
template void jit::TraceCacheIRStub(JSTracer* trc, IonICStub* stub,
const CacheIRStubInfo* stubInfo);
template <typename T>
bool jit::TraceWeakCacheIRStub(JSTracer* trc, T* stub,
const CacheIRStubInfo* stubInfo) {
using Type = StubField::Type;
// Trace all fields before returning because this stub can be traced again
// later through TraceBaselineStubFrame.
bool isDead = false;
uint32_t field = 0;
size_t offset = 0;
while (true) {
Type fieldType = stubInfo->fieldType(field);
switch (fieldType) {
case Type::WeakShape: {
WeakHeapPtr<Shape*>& shapeField =
stubInfo->getStubField<T, Type::WeakShape>(stub, offset);
auto r = TraceWeakEdge(trc, &shapeField, "cacheir-weak-shape");
if (r.isDead()) {
isDead = true;
}
break;
}
case Type::WeakObject: {
WeakHeapPtr<JSObject*>& objectField =
stubInfo->getStubField<T, Type::WeakObject>(stub, offset);
auto r = TraceWeakEdge(trc, &objectField, "cacheir-weak-object");
if (r.isDead()) {
isDead = true;
}
break;
}
case Type::WeakBaseScript: {
WeakHeapPtr<BaseScript*>& scriptField =
stubInfo->getStubField<T, Type::WeakBaseScript>(stub, offset);
auto r = TraceWeakEdge(trc, &scriptField, "cacheir-weak-script");
if (r.isDead()) {
isDead = true;
}
break;
}
case Type::WeakGetterSetter: {
WeakHeapPtr<GetterSetter*>& getterSetterField =
stubInfo->getStubField<T, Type::WeakGetterSetter>(stub, offset);
auto r = TraceWeakEdge(trc, &getterSetterField,
"cacheir-weak-getter-setter");
if (r.isDead()) {
isDead = true;
}
break;
}
case Type::Limit:
// Done.
return !isDead;
case Type::RawInt32:
case Type::RawPointer:
case Type::Shape:
case Type::JSObject:
case Type::Symbol:
case Type::String:
case Type::JitCode:
case Type::Id:
case Type::AllocSite:
case Type::RawInt64:
case Type::Value:
case Type::Double:
break; // Skip non-weak fields.
}
field++;
offset += StubField::sizeInBytes(fieldType);
}
}
template bool jit::TraceWeakCacheIRStub(JSTracer* trc, ICCacheIRStub* stub,
const CacheIRStubInfo* stubInfo);
template bool jit::TraceWeakCacheIRStub(JSTracer* trc, IonICStub* stub,
const CacheIRStubInfo* stubInfo);
bool CacheIRWriter::stubDataEquals(const uint8_t* stubData) const {
MOZ_ASSERT(!failed());
const uintptr_t* stubDataWords = reinterpret_cast<const uintptr_t*>(stubData);
for (const StubField& field : stubFields_) {
if (field.sizeIsWord()) {
if (field.asWord() != *stubDataWords) {
return false;
}
stubDataWords++;
continue;
}
if (field.asInt64() != *reinterpret_cast<const uint64_t*>(stubDataWords)) {
return false;
}
stubDataWords += sizeof(uint64_t) / sizeof(uintptr_t);
}
return true;
}
bool CacheIRWriter::stubDataEqualsIgnoring(const uint8_t* stubData,
uint32_t ignoreOffset) const {
MOZ_ASSERT(!failed());
uint32_t offset = 0;
for (const StubField& field : stubFields_) {
if (offset != ignoreOffset) {
if (field.sizeIsWord()) {
uintptr_t raw = *reinterpret_cast<const uintptr_t*>(stubData + offset);
if (field.asWord() != raw) {
return false;
}
} else {
uint64_t raw = *reinterpret_cast<const uint64_t*>(stubData + offset);
if (field.asInt64() != raw) {
return false;
}
}
}
offset += StubField::sizeInBytes(field.type());
}
return true;
}
HashNumber CacheIRStubKey::hash(const CacheIRStubKey::Lookup& l) {
HashNumber hash = mozilla::HashBytes(l.code, l.length);
hash = mozilla::AddToHash(hash, uint32_t(l.kind));
hash = mozilla::AddToHash(hash, uint32_t(l.engine));
return hash;
}
bool CacheIRStubKey::match(const CacheIRStubKey& entry,
const CacheIRStubKey::Lookup& l) {
if (entry.stubInfo->kind() != l.kind) {
return false;
}
if (entry.stubInfo->engine() != l.engine) {
return false;
}
if (entry.stubInfo->codeLength() != l.length) {
return false;
}
if (!mozilla::ArrayEqual(entry.stubInfo->code(), l.code, l.length)) {
return false;
}
return true;
}
CacheIRReader::CacheIRReader(const CacheIRStubInfo* stubInfo)
: CacheIRReader(stubInfo->code(),
stubInfo->code() + stubInfo->codeLength()) {}
CacheIRStubInfo* CacheIRStubInfo::New(CacheKind kind, ICStubEngine engine,
bool makesGCCalls,
uint32_t stubDataOffset,
const CacheIRWriter& writer) {
size_t numStubFields = writer.numStubFields();
size_t bytesNeeded =
sizeof(CacheIRStubInfo) + writer.codeLength() +
(numStubFields + 1); // +1 for the GCType::Limit terminator.
uint8_t* p = js_pod_malloc<uint8_t>(bytesNeeded);
if (!p) {
return nullptr;
}
// Copy the CacheIR code.
uint8_t* codeStart = p + sizeof(CacheIRStubInfo);
mozilla::PodCopy(codeStart, writer.codeStart(), writer.codeLength());
static_assert(sizeof(StubField::Type) == sizeof(uint8_t),
"StubField::Type must fit in uint8_t");
// Copy the stub field types.
uint8_t* fieldTypes = codeStart + writer.codeLength();
for (size_t i = 0; i < numStubFields; i++) {
fieldTypes[i] = uint8_t(writer.stubFieldType(i));
}
fieldTypes[numStubFields] = uint8_t(StubField::Type::Limit);
return new (p) CacheIRStubInfo(kind, engine, makesGCCalls, stubDataOffset,
writer.codeLength());
}
bool OperandLocation::operator==(const OperandLocation& other) const {
if (kind_ != other.kind_) {
return false;
}
switch (kind()) {
case Uninitialized:
return true;
case PayloadReg:
return payloadReg() == other.payloadReg() &&
payloadType() == other.payloadType();
case ValueReg:
return valueReg() == other.valueReg();
case PayloadStack:
return payloadStack() == other.payloadStack() &&
payloadType() == other.payloadType();
case ValueStack:
return valueStack() == other.valueStack();
case BaselineFrame:
return baselineFrameSlot() == other.baselineFrameSlot();
case Constant:
return constant() == other.constant();
case DoubleReg:
return doubleReg() == other.doubleReg();
}
MOZ_CRASH("Invalid OperandLocation kind");
}
AutoOutputRegister::AutoOutputRegister(CacheIRCompiler& compiler)
: output_(compiler.outputUnchecked_.ref()), alloc_(compiler.allocator) {
if (output_.hasValue()) {
alloc_.allocateFixedValueRegister(compiler.masm, output_.valueReg());
} else if (!output_.typedReg().isFloat()) {
alloc_.allocateFixedRegister(compiler.masm, output_.typedReg().gpr());
}
}
AutoOutputRegister::~AutoOutputRegister() {
if (output_.hasValue()) {
alloc_.releaseValueRegister(output_.valueReg());
} else if (!output_.typedReg().isFloat()) {
alloc_.releaseRegister(output_.typedReg().gpr());
}
}
bool FailurePath::canShareFailurePath(const FailurePath& other) const {
if (stackPushed_ != other.stackPushed_) {
return false;
}
if (spilledRegs_.length() != other.spilledRegs_.length()) {
return false;
}
for (size_t i = 0; i < spilledRegs_.length(); i++) {
if (spilledRegs_[i] != other.spilledRegs_[i]) {
return false;
}
}
MOZ_ASSERT(inputs_.length() == other.inputs_.length());
for (size_t i = 0; i < inputs_.length(); i++) {
if (inputs_[i] != other.inputs_[i]) {
return false;
}
}
return true;
}
bool CacheIRCompiler::addFailurePath(FailurePath** failure) {
#ifdef DEBUG
allocator.setAddedFailurePath();
#endif
MOZ_ASSERT(!allocator.hasAutoScratchFloatRegisterSpill());
FailurePath newFailure;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
if (!newFailure.appendInput(allocator.operandLocation(i))) {
return false;
}
}
if (!newFailure.setSpilledRegs(allocator.spilledRegs())) {
return false;
}
newFailure.setStackPushed(allocator.stackPushed());
// Reuse the previous failure path if the current one is the same, to
// avoid emitting duplicate code.
if (failurePaths.length() > 0 &&
failurePaths.back().canShareFailurePath(newFailure)) {
*failure = &failurePaths.back();
return true;
}
if (!failurePaths.append(std::move(newFailure))) {
return false;
}
*failure = &failurePaths.back();
return true;
}
bool CacheIRCompiler::emitFailurePath(size_t index) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
FailurePath& failure = failurePaths[index];
allocator.setStackPushed(failure.stackPushed());
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
allocator.setOperandLocation(i, failure.input(i));
}
if (!allocator.setSpilledRegs(failure.spilledRegs())) {
return false;
}
masm.bind(failure.label());
allocator.restoreInputState(masm);
return true;
}
bool CacheIRCompiler::emitGuardIsNumber(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
// Doubles and ints are numbers!
if (knownType == JSVAL_TYPE_DOUBLE || knownType == JSVAL_TYPE_INT32) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestNumber(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToObject(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_OBJECT) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestObject(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNullOrUndefined(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_UNDEFINED || knownType == JSVAL_TYPE_NULL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label success;
masm.branchTestNull(Assembler::Equal, input, &success);
masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
masm.bind(&success);
return true;
}
bool CacheIRCompiler::emitGuardIsNull(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_NULL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestNull(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsUndefined(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
JSValueType knownType = allocator.knownType(inputId);
if (knownType == JSVAL_TYPE_UNDEFINED) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNotUninitializedLexical(ValOperandId valId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand val = allocator.useValueRegister(masm, valId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestMagicValue(Assembler::Equal, val, JS_UNINITIALIZED_LEXICAL,
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardBooleanToInt32(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
Register input =
allocator.useRegister(masm, BooleanOperandId(inputId.id()));
masm.move32(input, output);
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.fallibleUnboxBoolean(input, output, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToString(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_STRING) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestString(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToSymbol(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_SYMBOL) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToBigInt(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_BIGINT) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToBoolean(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_BOOLEAN) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToInt32(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToNonGCThing(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestGCThing(Assembler::Equal, input, failure->label());
return true;
}
// Infallible |emitDouble| emitters can use this implementation to avoid
// generating extra clean-up instructions to restore the scratch float register.
// To select this function simply omit the |Label* fail| parameter for the
// emitter lambda function.
template <typename EmitDouble>
static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 1,
void>
EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
ValueOperand input, FailurePath* failure,
EmitDouble emitDouble) {
AutoScratchFloatRegister floatReg(compiler);
masm.unboxDouble(input, floatReg);
emitDouble(floatReg.get());
}
template <typename EmitDouble>
static std::enable_if_t<mozilla::FunctionTypeTraits<EmitDouble>::arity == 2,
void>
EmitGuardDouble(CacheIRCompiler* compiler, MacroAssembler& masm,
ValueOperand input, FailurePath* failure,
EmitDouble emitDouble) {
AutoScratchFloatRegister floatReg(compiler, failure);
masm.unboxDouble(input, floatReg);
emitDouble(floatReg.get(), floatReg.failure());
}
template <typename EmitInt32, typename EmitDouble>
static void EmitGuardInt32OrDouble(CacheIRCompiler* compiler,
MacroAssembler& masm, ValueOperand input,
Register output, FailurePath* failure,
EmitInt32 emitInt32, EmitDouble emitDouble) {
Label done;
{
ScratchTagScope tag(masm, input);
masm.splitTagForTest(input, tag);
Label notInt32;
masm.branchTestInt32(Assembler::NotEqual, tag, ¬Int32);
{
ScratchTagScopeRelease _(&tag);
masm.unboxInt32(input, output);
emitInt32();
masm.jump(&done);
}
masm.bind(¬Int32);
masm.branchTestDouble(Assembler::NotEqual, tag, failure->label());
{
ScratchTagScopeRelease _(&tag);
EmitGuardDouble(compiler, masm, input, failure, emitDouble);
}
}
masm.bind(&done);
}
bool CacheIRCompiler::emitGuardToInt32Index(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
Register input = allocator.useRegister(masm, Int32OperandId(inputId.id()));
masm.move32(input, output);
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitGuardInt32OrDouble(
this, masm, input, output, failure,
[]() {
// No-op if the value is already an int32.
},
[&](FloatRegister floatReg, Label* fail) {
// ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
masm.convertDoubleToInt32(floatReg, output, fail, false);
});
return true;
}
bool CacheIRCompiler::emitInt32ToIntPtr(Int32OperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
masm.move32SignExtendToPtr(input, output);
return true;
}
bool CacheIRCompiler::emitGuardNumberToIntPtrIndex(NumberOperandId inputId,
bool supportOOB,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure = nullptr;
if (!supportOOB) {
if (!addFailurePath(&failure)) {
return false;
}
}
AutoScratchFloatRegister floatReg(this, failure);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
// ToPropertyKey(-0.0) is "0", so we can truncate -0.0 to 0 here.
if (supportOOB) {
Label done, fail;
masm.convertDoubleToPtr(floatReg, output, &fail, false);
masm.jump(&done);
// Substitute the invalid index with an arbitrary out-of-bounds index.
masm.bind(&fail);
masm.movePtr(ImmWord(-1), output);
masm.bind(&done);
} else {
masm.convertDoubleToPtr(floatReg, output, floatReg.failure(), false);
}
return true;
}
static void TruncateDoubleModUint32(MacroAssembler& masm,
FloatRegister floatReg, Register result,
const LiveRegisterSet& liveVolatileRegs) {
Label truncateABICall;
masm.branchTruncateDoubleMaybeModUint32(floatReg, result, &truncateABICall);
if (truncateABICall.used()) {
Label done;
masm.jump(&done);
masm.bind(&truncateABICall);
LiveRegisterSet save = liveVolatileRegs;
save.takeUnchecked(floatReg);
save.takeUnchecked(floatReg.asSingle());
masm.PushRegsInMask(save);
using Fn = int32_t (*)(double);
masm.setupUnalignedABICall(result);
masm.passABIArg(floatReg, ABIType::Float64);
masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
masm.storeCallInt32Result(result);
LiveRegisterSet ignore;
ignore.add(result);
masm.PopRegsInMaskIgnore(save, ignore);
masm.bind(&done);
}
}
bool CacheIRCompiler::emitGuardToInt32ModUint32(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
if (input.constant()) {
masm.move32(Imm32(input.value().toInt32()), output);
} else {
MOZ_ASSERT(input.reg().type() == MIRType::Int32);
masm.move32(input.reg().typedReg().gpr(), output);
}
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitGuardInt32OrDouble(
this, masm, input, output, failure,
[]() {
// No-op if the value is already an int32.
},
[&](FloatRegister floatReg) {
TruncateDoubleModUint32(masm, floatReg, output, liveVolatileRegs());
});
return true;
}
bool CacheIRCompiler::emitGuardToUint8Clamped(ValOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register output = allocator.defineRegister(masm, resultId);
if (allocator.knownType(inputId) == JSVAL_TYPE_INT32) {
ConstantOrRegister input = allocator.useConstantOrRegister(masm, inputId);
if (input.constant()) {
masm.move32(Imm32(ClampDoubleToUint8(input.value().toInt32())), output);
} else {
MOZ_ASSERT(input.reg().type() == MIRType::Int32);
masm.move32(input.reg().typedReg().gpr(), output);
masm.clampIntToUint8(output);
}
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
EmitGuardInt32OrDouble(
this, masm, input, output, failure,
[&]() {
// |output| holds the unboxed int32 value.
masm.clampIntToUint8(output);
},
[&](FloatRegister floatReg) {
masm.clampDoubleToUint8(floatReg, output);
});
return true;
}
bool CacheIRCompiler::emitGuardNonDoubleType(ValOperandId inputId,
ValueType type) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
if (allocator.knownType(inputId) == JSValueType(type)) {
return true;
}
ValueOperand input = allocator.useValueRegister(masm, inputId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
switch (type) {
case ValueType::String:
masm.branchTestString(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Symbol:
masm.branchTestSymbol(Assembler::NotEqual, input, failure->label());
break;
case ValueType::BigInt:
masm.branchTestBigInt(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Int32:
masm.branchTestInt32(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Boolean:
masm.branchTestBoolean(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Undefined:
masm.branchTestUndefined(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Null:
masm.branchTestNull(Assembler::NotEqual, input, failure->label());
break;
case ValueType::Double:
case ValueType::Magic:
case ValueType::PrivateGCThing:
case ValueType::Object:
MOZ_CRASH("unexpected type");
}
return true;
}
static const JSClass* ClassFor(JSContext* cx, GuardClassKind kind) {
switch (kind) {
case GuardClassKind::Array:
case GuardClassKind::PlainObject:
case GuardClassKind::FixedLengthArrayBuffer:
case GuardClassKind::ImmutableArrayBuffer:
case GuardClassKind::ResizableArrayBuffer:
case GuardClassKind::FixedLengthSharedArrayBuffer:
case GuardClassKind::GrowableSharedArrayBuffer:
case GuardClassKind::FixedLengthDataView:
case GuardClassKind::ImmutableDataView:
case GuardClassKind::ResizableDataView:
case GuardClassKind::MappedArguments:
case GuardClassKind::UnmappedArguments:
case GuardClassKind::Set:
case GuardClassKind::Map:
case GuardClassKind::BoundFunction:
case GuardClassKind::Date:
return ClassFor(kind);
case GuardClassKind::WindowProxy:
return cx->runtime()->maybeWindowProxyClass();
case GuardClassKind::JSFunction:
MOZ_CRASH("must be handled by caller");
}
MOZ_CRASH("unexpected kind");
}
bool CacheIRCompiler::emitGuardClass(ObjOperandId objId, GuardClassKind kind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
if (kind == GuardClassKind::JSFunction) {
if (objectGuardNeedsSpectreMitigations(objId)) {
masm.branchTestObjIsFunction(Assembler::NotEqual, obj, scratch, obj,
failure->label());
} else {
masm.branchTestObjIsFunctionNoSpectreMitigations(
Assembler::NotEqual, obj, scratch, failure->label());
}
return true;
}
const JSClass* clasp = ClassFor(cx_, kind);
MOZ_ASSERT(clasp);
if (objectGuardNeedsSpectreMitigations(objId)) {
masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
failure->label());
} else {
masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
scratch, failure->label());
}
return true;
}
bool CacheIRCompiler::emitGuardNullProto(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadObjProto(obj, scratch);
masm.branchTestPtr(Assembler::NonZero, scratch, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsExtensible(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfObjectNotExtensible(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardDynamicSlotIsSpecificObject(
ObjOperandId objId, ObjOperandId expectedId, uint32_t slotOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register expectedObject = allocator.useRegister(masm, expectedId);
// Allocate registers before the failure path to make sure they're registered
// by addFailurePath.
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Guard on the expected object.
StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
emitLoadStubField(slot, scratch2);
BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
masm.fallibleUnboxObject(expectedSlot, scratch1, failure->label());
masm.branchPtr(Assembler::NotEqual, expectedObject, scratch1,
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardDynamicSlotIsNotObject(ObjOperandId objId,
uint32_t slotOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Guard that the slot isn't an object.
StubFieldOffset slot(slotOffset, StubField::Type::RawInt32);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
emitLoadStubField(slot, scratch2);
BaseObjectSlotIndex expectedSlot(scratch1, scratch2);
masm.branchTestObject(Assembler::Equal, expectedSlot, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardFixedSlotValue(ObjOperandId objId,
uint32_t offsetOffset,
uint32_t valOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
AutoScratchValueRegister scratchVal(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
emitLoadStubField(offset, scratch);
StubFieldOffset val(valOffset, StubField::Type::Value);
emitLoadValueStubField(val, scratchVal);
BaseIndex slotVal(obj, scratch, TimesOne);
masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardDynamicSlotValue(ObjOperandId objId,
uint32_t offsetOffset,
uint32_t valOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
AutoScratchValueRegister scratchVal(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
StubFieldOffset offset(offsetOffset, StubField::Type::RawInt32);
emitLoadStubField(offset, scratch2);
StubFieldOffset val(valOffset, StubField::Type::Value);
emitLoadValueStubField(val, scratchVal);
BaseIndex slotVal(scratch1, scratch2, TimesOne);
masm.branchTestValue(Assembler::NotEqual, slotVal, scratchVal,
failure->label());
return true;
}
bool CacheIRCompiler::emitLoadScriptedProxyHandler(ObjOperandId resultId,
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), output);
Address handlerAddr(output, js::detail::ProxyReservedSlots::offsetOfSlot(
ScriptedProxyHandler::HANDLER_EXTRA));
masm.fallibleUnboxObject(handlerAddr, output, failure->label());
return true;
}
bool CacheIRCompiler::emitIdToStringOrSymbol(ValOperandId resultId,
ValOperandId idId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand id = allocator.useValueRegister(masm, idId);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.moveValue(id, output);
Label done, intDone, callVM;
{
ScratchTagScope tag(masm, output);
masm.splitTagForTest(output, tag);
masm.branchTestString(Assembler::Equal, tag, &done);
masm.branchTestSymbol(Assembler::Equal, tag, &done);
masm.branchTestInt32(Assembler::NotEqual, tag, failure->label());
}
Register intReg = output.scratchReg();
masm.unboxInt32(output, intReg);
// Fast path for small integers.
masm.lookupStaticIntString(intReg, intReg, scratch, cx_->staticStrings(),
&callVM);
masm.jump(&intDone);
masm.bind(&callVM);
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = JSLinearString* (*)(JSContext* cx, int32_t i);
masm.setupUnalignedABICall(scratch);
masm.loadJSContext(scratch);
masm.passABIArg(scratch);
masm.passABIArg(intReg);
masm.callWithABI<Fn, js::Int32ToStringPure>();
masm.storeCallPointerResult(intReg);
LiveRegisterSet ignore;
ignore.add(intReg);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchPtr(Assembler::Equal, intReg, ImmPtr(nullptr), failure->label());
masm.bind(&intDone);
masm.tagValue(JSVAL_TYPE_STRING, intReg, output);
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLoadFixedSlot(ValOperandId resultId,
ObjOperandId objId,
uint32_t offsetOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
StubFieldOffset slotIndex(offsetOffset, StubField::Type::RawInt32);
emitLoadStubField(slotIndex, scratch);
masm.loadValue(BaseIndex(obj, scratch, TimesOne), output);
return true;
}
bool CacheIRCompiler::emitLoadDynamicSlot(ValOperandId resultId,
ObjOperandId objId,
uint32_t slotOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch1(allocator, masm);
Register scratch2 = output.scratchReg();
StubFieldOffset slotIndex(slotOffset, StubField::Type::RawInt32);
emitLoadStubField(slotIndex, scratch2);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
masm.loadValue(BaseObjectSlotIndex(scratch1, scratch2), output);
return true;
}
bool CacheIRCompiler::emitGuardIsNativeObject(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfNonNativeObj(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsProxy(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestObjectIsProxy(false, obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNotProxy(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestObjectIsProxy(true, obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToArrayBuffer(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfIsNotArrayBuffer(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardToSharedArrayBuffer(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfIsNotSharedArrayBuffer(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNotArrayBufferMaybeShared(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfIsArrayBufferMaybeShared(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsTypedArray(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadObjClassUnsafe(obj, scratch);
masm.branchIfClassIsNotTypedArray(scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNonResizableTypedArray(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadObjClassUnsafe(obj, scratch);
masm.branchIfClassIsNotNonResizableTypedArray(scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsResizableTypedArray(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadObjClassUnsafe(obj, scratch);
masm.branchIfClassIsNotResizableTypedArray(scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIsNotDOMProxy(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, scratch,
GetDOMProxyHandlerFamily(),
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardNoDenseElements(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
// Make sure there are no dense elements.
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::NotEqual, initLength, Imm32(0), failure->label());
return true;
}
bool CacheIRCompiler::emitGuardSpecificInt32(Int32OperandId numId,
int32_t expected) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register num = allocator.useRegister(masm, numId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branch32(Assembler::NotEqual, num, Imm32(expected), failure->label());
return true;
}
bool CacheIRCompiler::emitGuardStringToInt32(StringOperandId strId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
Register output = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.guardStringToInt32(str, output, scratch, liveVolatileRegs(),
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardStringToNumber(StringOperandId strId,
NumberOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label vmCall, done;
// Use indexed value as fast path if possible.
masm.loadStringIndexValue(str, scratch, &vmCall);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output);
masm.jump(&done);
{
masm.bind(&vmCall);
// Reserve stack for holding the result value of the call.
masm.reserveStack(sizeof(double));
masm.moveStackPtrTo(output.payloadOrValueReg());
// We cannot use callVM, as callVM expects to be able to clobber all
// operands, however, since this op is not the last in the generated IC, we
// want to be able to reference other live values.
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSContext* cx, JSString* str, double* result);
masm.setupUnalignedABICall(scratch);
masm.loadJSContext(scratch);
masm.passABIArg(scratch);
masm.passABIArg(str);
masm.passABIArg(output.payloadOrValueReg());
masm.callWithABI<Fn, js::StringToNumberPure>();
masm.storeCallPointerResult(scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
Label ok;
masm.branchIfTrueBool(scratch, &ok);
{
// OOM path, recovered by StringToNumberPure.
//
// Use addToStackPtr instead of freeStack as freeStack tracks stack height
// flow-insensitively, and using it twice would confuse the stack height
// tracking.
masm.addToStackPtr(Imm32(sizeof(double)));
masm.jump(failure->label());
}
masm.bind(&ok);
{
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(Address(output.payloadOrValueReg(), 0), fpscratch);
masm.boxDouble(fpscratch, output, fpscratch);
}
masm.freeStack(sizeof(double));
}
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitNumberParseIntResult(StringOperandId strId,
Int32OperandId radixId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register radix = allocator.useRegister(masm, radixId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, callvm.output());
#ifdef DEBUG
Label ok;
masm.branch32(Assembler::Equal, radix, Imm32(0), &ok);
masm.branch32(Assembler::Equal, radix, Imm32(10), &ok);
masm.assumeUnreachable("radix must be 0 or 10 for indexed value fast path");
masm.bind(&ok);
#endif
// Discard the stack to ensure it's balanced when we skip the vm-call.
allocator.discardStack(masm);
// Use indexed value as fast path if possible.
Label vmCall, done;
masm.loadStringIndexValue(str, scratch, &vmCall);
masm.tagValue(JSVAL_TYPE_INT32, scratch, callvm.outputValueReg());
masm.jump(&done);
{
masm.bind(&vmCall);
callvm.prepare();
masm.Push(radix);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, int32_t, MutableHandleValue);
callvm.call<Fn, js::NumberParseInt>();
}
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitDoubleParseIntResult(NumberOperandId numId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch2(*this, FloatReg1);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
allocator.ensureDoubleRegister(masm, numId, floatScratch1);
masm.branchDouble(Assembler::DoubleUnordered, floatScratch1, floatScratch1,
failure->label());
masm.branchTruncateDoubleToInt32(floatScratch1, scratch, failure->label());
Label ok;
masm.branch32(Assembler::NotEqual, scratch, Imm32(0), &ok);
{
// Accept both +0 and -0 and return 0.
masm.loadConstantDouble(0.0, floatScratch2);
masm.branchDouble(Assembler::DoubleEqual, floatScratch1, floatScratch2,
&ok);
// Fail if a non-zero input is in the exclusive range (-1, 1.0e-6).
masm.loadConstantDouble(DOUBLE_DECIMAL_IN_SHORTEST_LOW, floatScratch2);
masm.branchDouble(Assembler::DoubleLessThan, floatScratch1, floatScratch2,
failure->label());
}
masm.bind(&ok);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitStringToAtom(StringOperandId stringId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, stringId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done, vmCall;
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &done);
masm.tryFastAtomize(str, scratch, str, &vmCall);
masm.jump(&done);
masm.bind(&vmCall);
LiveRegisterSet save = liveVolatileRegs();
masm.PushRegsInMask(save);
using Fn = JSAtom* (*)(JSContext* cx, JSString* str);
masm.setupUnalignedABICall(scratch);
masm.loadJSContext(scratch);
masm.passABIArg(scratch);
masm.passABIArg(str);
masm.callWithABI<Fn, jit::AtomizeStringNoGC>();
masm.storeCallPointerResult(scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(save, ignore);
masm.branchPtr(Assembler::Equal, scratch, Imm32(0), failure->label());
masm.movePtr(scratch.get(), str);
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitBooleanToNumber(BooleanOperandId booleanId,
NumberOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register boolean = allocator.useRegister(masm, booleanId);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
masm.tagValue(JSVAL_TYPE_INT32, boolean, output);
return true;
}
bool CacheIRCompiler::emitGuardStringToIndex(StringOperandId strId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label vmCall, done;
masm.loadStringIndexValue(str, output, &vmCall);
masm.jump(&done);
{
masm.bind(&vmCall);
LiveRegisterSet save = liveVolatileRegs();
masm.PushRegsInMask(save);
using Fn = int32_t (*)(JSString* str);
masm.setupUnalignedABICall(output);
masm.passABIArg(str);
masm.callWithABI<Fn, GetIndexFromString>();
masm.storeCallInt32Result(output);
LiveRegisterSet ignore;
ignore.add(output);
masm.PopRegsInMaskIgnore(save, ignore);
// GetIndexFromString returns a negative value on failure.
masm.branchTest32(Assembler::Signed, output, output, failure->label());
}
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLoadProto(ObjOperandId objId, ObjOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register reg = allocator.defineRegister(masm, resultId);
masm.loadObjProto(obj, reg);
#ifdef DEBUG
// We shouldn't encounter a null or lazy proto.
MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
Label done;
masm.branchPtr(Assembler::Above, reg, ImmWord(1), &done);
masm.assumeUnreachable("Unexpected null or lazy proto in CacheIR LoadProto");
masm.bind(&done);
#endif
return true;
}
bool CacheIRCompiler::emitLoadEnclosingEnvironment(ObjOperandId objId,
ObjOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register reg = allocator.defineRegister(masm, resultId);
masm.unboxObject(
Address(obj, EnvironmentObject::offsetOfEnclosingEnvironment()), reg);
return true;
}
bool CacheIRCompiler::emitLoadWrapperTarget(ObjOperandId objId,
ObjOperandId resultId,
bool fallible) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register reg = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (fallible && !addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), reg);
Address targetAddr(reg,
js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
if (fallible) {
masm.fallibleUnboxObject(targetAddr, reg, failure->label());
} else {
masm.unboxObject(targetAddr, reg);
}
return true;
}
bool CacheIRCompiler::emitLoadValueTag(ValOperandId valId,
ValueTagOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand val = allocator.useValueRegister(masm, valId);
Register res = allocator.defineRegister(masm, resultId);
Register tag = masm.extractTag(val, res);
if (tag != res) {
masm.mov(tag, res);
}
return true;
}
bool CacheIRCompiler::emitLoadDOMExpandoValue(ObjOperandId objId,
ValOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
ValueOperand val = allocator.defineValueRegister(masm, resultId);
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
val.scratchReg());
masm.loadValue(Address(val.scratchReg(),
js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
val);
return true;
}
bool CacheIRCompiler::emitLoadDOMExpandoValueIgnoreGeneration(
ObjOperandId objId, ValOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
// Determine the expando's Address.
Register scratch = output.scratchReg();
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
Address expandoAddr(scratch,
js::detail::ProxyReservedSlots::offsetOfPrivateSlot());
#ifdef DEBUG
// Private values are stored as doubles, so assert we have a double.
Label ok;
masm.branchTestDouble(Assembler::Equal, expandoAddr, &ok);
masm.assumeUnreachable("DOM expando is not a PrivateValue!");
masm.bind(&ok);
#endif
// Load the ExpandoAndGeneration* from the PrivateValue.
masm.loadPrivate(expandoAddr, scratch);
// Load expandoAndGeneration->expando into the output Value register.
masm.loadValue(Address(scratch, ExpandoAndGeneration::offsetOfExpando()),
output);
return true;
}
bool CacheIRCompiler::emitLoadUndefinedResult() {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
masm.moveValue(UndefinedValue(), output.valueReg());
return true;
}
static void EmitStoreBoolean(MacroAssembler& masm, bool b,
const AutoOutputRegister& output) {
if (output.hasValue()) {
Value val = BooleanValue(b);
masm.moveValue(val, output.valueReg());
} else {
MOZ_ASSERT(output.type() == JSVAL_TYPE_BOOLEAN);
masm.movePtr(ImmWord(b), output.typedReg().gpr());
}
}
bool CacheIRCompiler::emitLoadBooleanResult(bool val) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
EmitStoreBoolean(masm, val, output);
return true;
}
bool CacheIRCompiler::emitLoadOperandResult(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
ValueOperand input = allocator.useValueRegister(masm, inputId);
masm.moveValue(input, output.valueReg());
return true;
}
static void EmitStoreResult(MacroAssembler& masm, Register reg,
JSValueType type,
const AutoOutputRegister& output) {
if (output.hasValue()) {
masm.tagValue(type, reg, output.valueReg());
return;
}
if (type == JSVAL_TYPE_INT32 && output.typedReg().isFloat()) {
masm.convertInt32ToDouble(reg, output.typedReg().fpu());
return;
}
if (type == output.type()) {
masm.mov(reg, output.typedReg().gpr());
return;
}
masm.assumeUnreachable("Should have monitored result");
}
bool CacheIRCompiler::emitLoadInt32ArrayLengthResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
masm.load32(Address(scratch, ObjectElements::offsetOfLength()), scratch);
// Guard length fits in an int32.
masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadInt32ArrayLength(ObjOperandId objId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register res = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), res);
masm.load32(Address(res, ObjectElements::offsetOfLength()), res);
// Guard length fits in an int32.
masm.branchTest32(Assembler::Signed, res, res, failure->label());
return true;
}
bool CacheIRCompiler::emitDoubleAddResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
masm.addDouble(floatScratch1, floatScratch0);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitDoubleSubResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
masm.subDouble(floatScratch1, floatScratch0);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitDoubleMulResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
masm.mulDouble(floatScratch1, floatScratch0);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitDoubleDivResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
masm.divDouble(floatScratch1, floatScratch0);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitDoubleModResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
LiveRegisterSet save = liveVolatileRegs();
masm.PushRegsInMask(save);
using Fn = double (*)(double a, double b);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(floatScratch0, ABIType::Float64);
masm.passABIArg(floatScratch1, ABIType::Float64);
masm.callWithABI<Fn, js::NumberMod>(ABIType::Float64);
masm.storeCallFloatResult(floatScratch0);
LiveRegisterSet ignore;
ignore.add(floatScratch0);
masm.PopRegsInMaskIgnore(save, ignore);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitDoublePowResult(NumberOperandId lhsId,
NumberOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
AutoAvailableFloatRegister floatScratch0(*this, FloatReg0);
AutoAvailableFloatRegister floatScratch1(*this, FloatReg1);
allocator.ensureDoubleRegister(masm, lhsId, floatScratch0);
allocator.ensureDoubleRegister(masm, rhsId, floatScratch1);
LiveRegisterSet save = liveVolatileRegs();
masm.PushRegsInMask(save);
using Fn = double (*)(double x, double y);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(floatScratch0, ABIType::Float64);
masm.passABIArg(floatScratch1, ABIType::Float64);
masm.callWithABI<Fn, js::ecmaPow>(ABIType::Float64);
masm.storeCallFloatResult(floatScratch0);
LiveRegisterSet ignore;
ignore.add(floatScratch0);
masm.PopRegsInMaskIgnore(save, ignore);
masm.boxDouble(floatScratch0, output.valueReg(), floatScratch0);
return true;
}
bool CacheIRCompiler::emitInt32AddResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.mov(rhs, scratch);
masm.branchAdd32(Assembler::Overflow, lhs, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32SubResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.mov(lhs, scratch);
masm.branchSub32(Assembler::Overflow, rhs, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32MulResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegister scratch(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label maybeNegZero, done;
masm.mov(lhs, scratch);
masm.branchMul32(Assembler::Overflow, rhs, scratch, failure->label());
masm.branchTest32(Assembler::Zero, scratch, scratch, &maybeNegZero);
masm.jump(&done);
masm.bind(&maybeNegZero);
masm.mov(lhs, scratch2);
// Result is -0 if exactly one of lhs or rhs is negative.
masm.or32(rhs, scratch2);
masm.branchTest32(Assembler::Signed, scratch2, scratch2, failure->label());
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32DivResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegister rem(allocator, masm);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Prevent division by 0.
masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
// Prevent -2147483648 / -1.
Label notOverflow;
masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), ¬Overflow);
masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
masm.bind(¬Overflow);
// Prevent negative 0.
Label notZero;
masm.branchTest32(Assembler::NonZero, lhs, lhs, ¬Zero);
masm.branchTest32(Assembler::Signed, rhs, rhs, failure->label());
masm.bind(¬Zero);
masm.mov(lhs, scratch);
masm.flexibleDivMod32(rhs, scratch, rem, false, liveVolatileRegs());
// A remainder implies a double result.
masm.branchTest32(Assembler::NonZero, rem, rem, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32ModResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// x % 0 results in NaN
masm.branchTest32(Assembler::Zero, rhs, rhs, failure->label());
// Prevent -2147483648 % -1.
//
// Traps on x86 and has undefined behavior on ARM32 (when __aeabi_idivmod is
// called).
Label notOverflow;
masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), ¬Overflow);
masm.branch32(Assembler::Equal, rhs, Imm32(-1), failure->label());
masm.bind(¬Overflow);
masm.mov(lhs, scratch);
masm.flexibleRemainder32(rhs, scratch, false, liveVolatileRegs());
// Modulo takes the sign of the dividend; we can't return negative zero here.
Label notZero;
masm.branchTest32(Assembler::NonZero, scratch, scratch, ¬Zero);
masm.branchTest32(Assembler::Signed, lhs, lhs, failure->label());
masm.bind(¬Zero);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32PowResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register base = allocator.useRegister(masm, lhsId);
Register power = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
AutoScratchRegister scratch3(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.pow32(base, power, scratch1, scratch2, scratch3, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32BitOrResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
masm.mov(rhs, scratch);
masm.or32(lhs, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32BitXorResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
masm.mov(rhs, scratch);
masm.xor32(lhs, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32BitAndResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
masm.mov(rhs, scratch);
masm.and32(lhs, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32LeftShiftResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.mov(lhs, scratch);
masm.flexibleLshift32(rhs, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32RightShiftResult(Int32OperandId lhsId,
Int32OperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.mov(lhs, scratch);
masm.flexibleRshift32Arithmetic(rhs, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32URightShiftResult(Int32OperandId lhsId,
Int32OperandId rhsId,
bool forceDouble) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.mov(lhs, scratch);
masm.flexibleRshift32(rhs, scratch);
if (forceDouble) {
ScratchDoubleScope fpscratch(masm);
masm.convertUInt32ToDouble(scratch, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
} else {
masm.branchTest32(Assembler::Signed, scratch, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
}
return true;
}
bool CacheIRCompiler::emitInt32NegationResult(Int32OperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register val = allocator.useRegister(masm, inputId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Guard against 0 and MIN_INT by checking if low 31-bits are all zero.
// Both of these result in a double.
masm.branchTest32(Assembler::Zero, val, Imm32(0x7fffffff), failure->label());
masm.mov(val, scratch);
masm.neg32(scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32IncResult(Int32OperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register input = allocator.useRegister(masm, inputId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.mov(input, scratch);
masm.branchAdd32(Assembler::Overflow, Imm32(1), scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32DecResult(Int32OperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register input = allocator.useRegister(masm, inputId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.mov(input, scratch);
masm.branchSub32(Assembler::Overflow, Imm32(1), scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitInt32NotResult(Int32OperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register val = allocator.useRegister(masm, inputId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.mov(val, scratch);
masm.not32(scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitDoubleNegationResult(NumberOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchFloatRegister floatReg(this);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
masm.negateDouble(floatReg);
masm.boxDouble(floatReg, output.valueReg(), floatReg);
return true;
}
bool CacheIRCompiler::emitDoubleIncDecResult(bool isInc,
NumberOperandId inputId) {
AutoOutputRegister output(*this);
AutoScratchFloatRegister floatReg(this);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
{
ScratchDoubleScope fpscratch(masm);
masm.loadConstantDouble(1.0, fpscratch);
if (isInc) {
masm.addDouble(fpscratch, floatReg);
} else {
masm.subDouble(fpscratch, floatReg);
}
}
masm.boxDouble(floatReg, output.valueReg(), floatReg);
return true;
}
bool CacheIRCompiler::emitDoubleIncResult(NumberOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitDoubleIncDecResult(true, inputId);
}
bool CacheIRCompiler::emitDoubleDecResult(NumberOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitDoubleIncDecResult(false, inputId);
}
template <typename Fn, Fn fn>
bool CacheIRCompiler::emitBigIntBinaryOperationShared(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
AutoCallVM callvm(masm, this, allocator);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
callvm.prepare();
masm.Push(rhs);
masm.Push(lhs);
callvm.call<Fn, fn>();
return true;
}
bool CacheIRCompiler::emitBigIntAddResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::add>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntSubResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::sub>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntMulResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::mul>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntDivResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::div>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntModResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::mod>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntPowResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::pow>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntBitAndResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::bitAnd>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntBitOrResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::bitOr>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntBitXorResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::bitXor>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntLeftShiftResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::lsh>(lhsId, rhsId);
}
bool CacheIRCompiler::emitBigIntRightShiftResult(BigIntOperandId lhsId,
BigIntOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt, HandleBigInt);
return emitBigIntBinaryOperationShared<Fn, BigInt::rsh>(lhsId, rhsId);
}
template <typename Fn, Fn fn>
bool CacheIRCompiler::emitBigIntUnaryOperationShared(BigIntOperandId inputId) {
AutoCallVM callvm(masm, this, allocator);
Register val = allocator.useRegister(masm, inputId);
callvm.prepare();
masm.Push(val);
callvm.call<Fn, fn>();
return true;
}
bool CacheIRCompiler::emitBigIntNotResult(BigIntOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt);
return emitBigIntUnaryOperationShared<Fn, BigInt::bitNot>(inputId);
}
bool CacheIRCompiler::emitBigIntNegationResult(BigIntOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt);
return emitBigIntUnaryOperationShared<Fn, BigInt::neg>(inputId);
}
bool CacheIRCompiler::emitBigIntIncResult(BigIntOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt);
return emitBigIntUnaryOperationShared<Fn, BigInt::inc>(inputId);
}
bool CacheIRCompiler::emitBigIntDecResult(BigIntOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
using Fn = BigInt* (*)(JSContext*, HandleBigInt);
return emitBigIntUnaryOperationShared<Fn, BigInt::dec>(inputId);
}
bool CacheIRCompiler::emitBigIntToIntPtr(BigIntOperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadBigIntPtr(input, output, failure->label());
return true;
}
static gc::Heap InitialBigIntHeap(JSContext* cx) {
JS::Zone* zone = cx->zone();
return zone->allocNurseryBigInts() ? gc::Heap::Default : gc::Heap::Tenured;
}
static void EmitAllocateBigInt(MacroAssembler& masm, Register result,
Register temp, const LiveRegisterSet& liveSet,
gc::Heap initialHeap, Label* fail) {
Label fallback, done;
masm.newGCBigInt(result, temp, initialHeap, &fallback);
masm.jump(&done);
{
masm.bind(&fallback);
// Request a minor collection at a later time if nursery allocation failed.
bool requestMinorGC = initialHeap == gc::Heap::Default;
masm.PushRegsInMask(liveSet);
using Fn = void* (*)(JSContext* cx, bool requestMinorGC);
masm.setupUnalignedABICall(temp);
masm.loadJSContext(temp);
masm.passABIArg(temp);
masm.move32(Imm32(requestMinorGC), result);
masm.passABIArg(result);
masm.callWithABI<Fn, jit::AllocateBigIntNoGC>();
masm.storeCallPointerResult(result);
masm.PopRegsInMask(liveSet);
masm.branchPtr(Assembler::Equal, result, ImmWord(0), fail);
}
masm.bind(&done);
}
bool CacheIRCompiler::emitIntPtrToBigIntResult(IntPtrOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register input = allocator.useRegister(masm, inputId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
LiveRegisterSet save = liveVolatileRegs();
save.takeUnchecked(scratch1);
save.takeUnchecked(scratch2);
save.takeUnchecked(output);
// Allocate a new BigInt. The code after this must be infallible.
gc::Heap initialHeap = InitialBigIntHeap(cx_);
EmitAllocateBigInt(masm, scratch1, scratch2, save, initialHeap,
failure->label());
masm.movePtr(input, scratch2);
masm.initializeBigIntPtr(scratch1, scratch2);
masm.tagValue(JSVAL_TYPE_BIGINT, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitBigIntPtrAdd(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(rhs, output);
masm.branchAddPtr(Assembler::Overflow, lhs, output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrSub(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(lhs, output);
masm.branchSubPtr(Assembler::Overflow, rhs, output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrMul(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(rhs, output);
masm.branchMulPtr(Assembler::Overflow, lhs, output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrDiv(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
static constexpr auto DigitMin = std::numeric_limits<
mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
// Prevent division by 0.
masm.branchTestPtr(Assembler::Zero, rhs, rhs, failure->label());
// Prevent INTPTR_MIN / -1.
Label notOverflow;
masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), ¬Overflow);
masm.branchPtr(Assembler::Equal, rhs, Imm32(-1), failure->label());
masm.bind(¬Overflow);
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.movePtr(lhs, output);
masm.flexibleQuotientPtr(rhs, output, false, volatileRegs);
return true;
}
bool CacheIRCompiler::emitBigIntPtrMod(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
static constexpr auto DigitMin = std::numeric_limits<
mozilla::SignedStdintTypeForSize<sizeof(BigInt::Digit)>::Type>::min();
// Prevent division by 0.
masm.branchTestPtr(Assembler::Zero, rhs, rhs, failure->label());
masm.movePtr(lhs, output);
// Prevent INTPTR_MIN / -1.
Label notOverflow;
masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(DigitMin), ¬Overflow);
masm.branchPtr(Assembler::NotEqual, rhs, Imm32(-1), ¬Overflow);
masm.movePtr(ImmWord(0), output);
masm.bind(¬Overflow);
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.flexibleRemainderPtr(rhs, output, false, volatileRegs);
return true;
}
bool CacheIRCompiler::emitBigIntPtrPow(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.powPtr(lhs, rhs, output, scratch1, scratch2, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrBitOr(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
masm.movePtr(rhs, output);
masm.orPtr(lhs, output);
return true;
}
bool CacheIRCompiler::emitBigIntPtrBitXor(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
masm.movePtr(rhs, output);
masm.xorPtr(lhs, output);
return true;
}
bool CacheIRCompiler::emitBigIntPtrBitAnd(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
masm.movePtr(rhs, output);
masm.andPtr(lhs, output);
return true;
}
bool CacheIRCompiler::emitBigIntPtrLeftShift(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.movePtr(lhs, output);
// 0n << x == 0n
masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
// x << DigitBits with x != 0n always exceeds pointer-sized storage.
masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(BigInt::DigitBits),
failure->label());
// x << -DigitBits == x >> DigitBits, which is either 0n or -1n.
Label shift;
masm.branchPtr(Assembler::GreaterThan, rhs,
Imm32(-int32_t(BigInt::DigitBits)), &shift);
{
masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
masm.jump(&done);
}
masm.bind(&shift);
// |x << -y| is computed as |x >> y|.
Label leftShift;
masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &leftShift);
{
masm.movePtr(rhs, scratch);
masm.negPtr(scratch);
masm.flexibleRshiftPtrArithmetic(scratch, output);
masm.jump(&done);
}
masm.bind(&leftShift);
masm.flexibleLshiftPtr(rhs, output);
// Check for overflow: ((lhs << rhs) >> rhs) == lhs.
masm.movePtr(output, scratch);
masm.flexibleRshiftPtrArithmetic(rhs, scratch);
masm.branchPtr(Assembler::NotEqual, scratch, lhs, failure->label());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitBigIntPtrRightShift(IntPtrOperandId lhsId,
IntPtrOperandId rhsId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
Register output = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.movePtr(lhs, output);
// 0n >> x == 0n
masm.branchPtr(Assembler::Equal, lhs, Imm32(0), &done);
// x >> -DigitBits == x << DigitBits, which exceeds pointer-sized storage.
masm.branchPtr(Assembler::LessThanOrEqual, rhs,
Imm32(-int32_t(BigInt::DigitBits)), failure->label());
// x >> DigitBits is either 0n or -1n.
Label shift;
masm.branchPtr(Assembler::LessThan, rhs, Imm32(BigInt::DigitBits), &shift);
{
masm.rshiftPtrArithmetic(Imm32(BigInt::DigitBits - 1), output);
masm.jump(&done);
}
masm.bind(&shift);
// |x >> -y| is computed as |x << y|.
Label rightShift;
masm.branchPtr(Assembler::GreaterThanOrEqual, rhs, Imm32(0), &rightShift);
{
masm.movePtr(rhs, scratch1);
masm.negPtr(scratch1);
masm.flexibleLshiftPtr(scratch1, output);
// Check for overflow: ((lhs << rhs) >> rhs) == lhs.
masm.movePtr(output, scratch2);
masm.flexibleRshiftPtrArithmetic(scratch1, scratch2);
masm.branchPtr(Assembler::NotEqual, scratch2, lhs, failure->label());
masm.jump(&done);
}
masm.bind(&rightShift);
masm.flexibleRshiftPtrArithmetic(rhs, output);
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitBigIntPtrNegation(IntPtrOperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(input, output);
masm.branchNegPtr(Assembler::Overflow, output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrInc(IntPtrOperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(input, output);
masm.branchAddPtr(Assembler::Overflow, Imm32(1), output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrDec(IntPtrOperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.movePtr(input, output);
masm.branchSubPtr(Assembler::Overflow, Imm32(1), output, failure->label());
return true;
}
bool CacheIRCompiler::emitBigIntPtrNot(IntPtrOperandId inputId,
IntPtrOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register input = allocator.useRegister(masm, inputId);
Register output = allocator.defineRegister(masm, resultId);
masm.movePtr(input, output);
masm.notPtr(output);
return true;
}
bool CacheIRCompiler::emitTruncateDoubleToUInt32(NumberOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register res = allocator.defineRegister(masm, resultId);
AutoScratchFloatRegister floatReg(this);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
TruncateDoubleModUint32(masm, floatReg, res, liveVolatileRegs());
return true;
}
bool CacheIRCompiler::emitDoubleToUint8Clamped(NumberOperandId inputId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register res = allocator.defineRegister(masm, resultId);
AutoScratchFloatRegister floatReg(this);
allocator.ensureDoubleRegister(masm, inputId, floatReg);
masm.clampDoubleToUint8(floatReg, res);
return true;
}
bool CacheIRCompiler::emitLoadArgumentsObjectLengthResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArgumentsObjectLength(obj, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadArgumentsObjectLength(ObjOperandId objId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register res = allocator.defineRegister(masm, resultId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArgumentsObjectLength(obj, res, failure->label());
return true;
}
bool CacheIRCompiler::emitLoadArrayBufferByteLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadArrayBufferByteLengthDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
ScratchDoubleScope fpscratch(masm);
masm.loadArrayBufferByteLengthIntPtr(obj, scratch);
masm.convertIntPtrToDouble(scratch, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitLoadArrayBufferViewLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadArrayBufferViewLengthDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
ScratchDoubleScope fpscratch(masm);
masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
masm.convertIntPtrToDouble(scratch, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitLoadBoundFunctionNumArgs(ObjOperandId objId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register output = allocator.defineRegister(masm, resultId);
masm.unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()),
output);
masm.rshift32(Imm32(BoundFunctionObject::NumBoundArgsShift), output);
return true;
}
bool CacheIRCompiler::emitLoadBoundFunctionTarget(ObjOperandId objId,
ObjOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register output = allocator.defineRegister(masm, resultId);
masm.unboxObject(Address(obj, BoundFunctionObject::offsetOfTargetSlot()),
output);
return true;
}
bool CacheIRCompiler::emitLoadBoundFunctionArgument(ObjOperandId objId,
uint32_t index,
ValOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
ValueOperand output = allocator.defineValueRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
constexpr size_t inlineArgsOffset =
BoundFunctionObject::offsetOfFirstInlineBoundArg();
masm.unboxObject(Address(obj, inlineArgsOffset), scratch);
masm.loadPtr(Address(scratch, NativeObject::offsetOfElements()), scratch);
masm.loadValue(Address(scratch, index * sizeof(Value)), output);
return true;
}
bool CacheIRCompiler::emitGuardBoundFunctionIsConstructor(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address flagsSlot(obj, BoundFunctionObject::offsetOfFlagsSlot());
masm.branchTest32(Assembler::Zero, flagsSlot,
Imm32(BoundFunctionObject::IsConstructorFlag),
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardObjectIdentity(ObjOperandId obj1Id,
ObjOperandId obj2Id) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj1 = allocator.useRegister(masm, obj1Id);
Register obj2 = allocator.useRegister(masm, obj2Id);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchPtr(Assembler::NotEqual, obj1, obj2, failure->label());
return true;
}
bool CacheIRCompiler::emitLoadFunctionLengthResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Get the JSFunction flags and arg count.
masm.load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), scratch);
// Functions with a SelfHostedLazyScript must be compiled with the slow-path
// before the function length is known. If the length was previously resolved,
// the length property may be shadowed.
masm.branchTest32(
Assembler::NonZero, scratch,
Imm32(FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH),
failure->label());
masm.loadFunctionLength(obj, scratch, scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadFunctionNameResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadFunctionName(obj, scratch, ImmGCPtr(cx_->names().empty_),
failure->label());
masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLinearizeForCharAccess(StringOperandId strId,
Int32OperandId indexId,
StringOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
Register index = allocator.useRegister(masm, indexId);
Register result = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.movePtr(str, result);
// We can omit the bounds check, because we only compare the index against the
// string length. In the worst case we unnecessarily linearize the string
// when the index is out-of-bounds.
masm.branchIfCanLoadStringChar(str, index, scratch, &done);
{
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = JSLinearString* (*)(JSString*);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(str);
masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
masm.storeCallPointerResult(result);
LiveRegisterSet ignore;
ignore.add(result);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
}
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLinearizeForCodePointAccess(
StringOperandId strId, Int32OperandId indexId, StringOperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
Register index = allocator.useRegister(masm, indexId);
Register result = allocator.defineRegister(masm, resultId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.movePtr(str, result);
// We can omit the bounds check, because we only compare the index against the
// string length. In the worst case we unnecessarily linearize the string
// when the index is out-of-bounds.
masm.branchIfCanLoadStringCodePoint(str, index, scratch1, scratch2, &done);
{
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = JSLinearString* (*)(JSString*);
masm.setupUnalignedABICall(scratch1);
masm.passABIArg(str);
masm.callWithABI<Fn, js::jit::LinearizeForCharAccessPure>();
masm.storeCallPointerResult(result);
LiveRegisterSet ignore;
ignore.add(result);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchTestPtr(Assembler::Zero, result, result, failure->label());
}
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitToRelativeStringIndex(Int32OperandId indexId,
StringOperandId strId,
Int32OperandId resultId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register index = allocator.useRegister(masm, indexId);
Register str = allocator.useRegister(masm, strId);
Register result = allocator.defineRegister(masm, resultId);
// If |index| is non-negative, it's an index relative to the start of the
// string. Otherwise it's an index relative to the end of the string.
masm.move32(Imm32(0), result);
masm.cmp32Load32(Assembler::LessThan, index, Imm32(0),
Address(str, JSString::offsetOfLength()), result);
masm.add32(index, result);
return true;
}
bool CacheIRCompiler::emitLoadStringLengthResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register str = allocator.useRegister(masm, strId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.loadStringLength(str, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitLoadStringCharCodeResult(StringOperandId strId,
Int32OperandId indexId,
bool handleOOB) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register str = allocator.useRegister(masm, strId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
AutoScratchRegister scratch3(allocator, masm);
// Bounds check, load string char.
Label done;
if (!handleOOB) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch1, failure->label());
masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
failure->label());
} else {
// Return NaN for out-of-bounds access.
masm.moveValue(JS::NaNValue(), output.valueReg());
// The bounds check mustn't use a scratch register which aliases the output.
MOZ_ASSERT(!output.valueReg().aliases(scratch3));
// This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
// guaranteed to see no nested ropes.
Label loadFailed;
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch3, &done);
masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
Label loadedChar;
masm.jump(&loadedChar);
masm.bind(&loadFailed);
masm.assumeUnreachable("loadStringChar can't fail for linear strings");
masm.bind(&loadedChar);
}
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLoadStringCodePointResult(StringOperandId strId,
Int32OperandId indexId,
bool handleOOB) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register str = allocator.useRegister(masm, strId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
AutoScratchRegister scratch3(allocator, masm);
// Bounds check, load string char.
Label done;
if (!handleOOB) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch1, failure->label());
masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
failure->label());
} else {
// Return undefined for out-of-bounds access.
masm.moveValue(JS::UndefinedValue(), output.valueReg());
// The bounds check mustn't use a scratch register which aliases the output.
MOZ_ASSERT(!output.valueReg().aliases(scratch3));
// This CacheIR op is always preceded by |LinearizeForCodePointAccess|, so
// we're guaranteed to see no nested ropes or split surrogates.
Label loadFailed;
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch3, &done);
masm.loadStringCodePoint(str, index, scratch1, scratch2, scratch3,
&loadFailed);
Label loadedChar;
masm.jump(&loadedChar);
masm.bind(&loadFailed);
masm.assumeUnreachable("loadStringCodePoint can't fail for linear strings");
masm.bind(&loadedChar);
}
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitNewMapObjectResult(uint32_t templateObjectOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
callvm.prepare();
masm.Push(ImmPtr(nullptr)); // proto
using Fn = MapObject* (*)(JSContext*, HandleObject);
callvm.call<Fn, MapObject::create>();
return true;
}
bool CacheIRCompiler::emitNewSetObjectResult(uint32_t templateObjectOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
callvm.prepare();
masm.Push(ImmPtr(nullptr)); // proto
using Fn = SetObject* (*)(JSContext*, HandleObject);
callvm.call<Fn, SetObject::create>();
return true;
}
bool CacheIRCompiler::emitNewMapObjectFromIterableResult(
uint32_t templateObjectOffset, ValOperandId iterableId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
ValueOperand iterable = allocator.useValueRegister(masm, iterableId);
callvm.prepare();
masm.Push(ImmPtr(nullptr)); // allocatedFromJit
masm.Push(iterable);
masm.Push(ImmPtr(nullptr)); // proto
using Fn = MapObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
Handle<MapObject*>);
callvm.call<Fn, MapObject::createFromIterable>();
return true;
}
bool CacheIRCompiler::emitNewSetObjectFromIterableResult(
uint32_t templateObjectOffset, ValOperandId iterableId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
ValueOperand iterable = allocator.useValueRegister(masm, iterableId);
callvm.prepare();
masm.Push(ImmPtr(nullptr)); // allocatedFromJit
masm.Push(iterable);
masm.Push(ImmPtr(nullptr)); // proto
using Fn = SetObject* (*)(JSContext*, Handle<JSObject*>, Handle<Value>,
Handle<SetObject*>);
callvm.call<Fn, SetObject::createFromIterable>();
return true;
}
bool CacheIRCompiler::emitNewStringObjectResult(uint32_t templateObjectOffset,
StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSObject* (*)(JSContext*, HandleString);
callvm.call<Fn, NewStringObject>();
return true;
}
bool CacheIRCompiler::emitStringIncludesResult(StringOperandId strId,
StringOperandId searchStrId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register searchStr = allocator.useRegister(masm, searchStrId);
callvm.prepare();
masm.Push(searchStr);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
callvm.call<Fn, js::StringIncludes>();
return true;
}
bool CacheIRCompiler::emitStringIndexOfResult(StringOperandId strId,
StringOperandId searchStrId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register searchStr = allocator.useRegister(masm, searchStrId);
callvm.prepare();
masm.Push(searchStr);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
callvm.call<Fn, js::StringIndexOf>();
return true;
}
bool CacheIRCompiler::emitStringLastIndexOfResult(StringOperandId strId,
StringOperandId searchStrId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register searchStr = allocator.useRegister(masm, searchStrId);
callvm.prepare();
masm.Push(searchStr);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, HandleString, int32_t*);
callvm.call<Fn, js::StringLastIndexOf>();
return true;
}
bool CacheIRCompiler::emitStringStartsWithResult(StringOperandId strId,
StringOperandId searchStrId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register searchStr = allocator.useRegister(masm, searchStrId);
callvm.prepare();
masm.Push(searchStr);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
callvm.call<Fn, js::StringStartsWith>();
return true;
}
bool CacheIRCompiler::emitStringEndsWithResult(StringOperandId strId,
StringOperandId searchStrId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
Register searchStr = allocator.useRegister(masm, searchStrId);
callvm.prepare();
masm.Push(searchStr);
masm.Push(str);
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
callvm.call<Fn, js::StringEndsWith>();
return true;
}
bool CacheIRCompiler::emitStringToLowerCaseResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSLinearString* (*)(JSContext*, JSString*);
callvm.call<Fn, js::StringToLowerCase>();
return true;
}
bool CacheIRCompiler::emitStringToUpperCaseResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSLinearString* (*)(JSContext*, JSString*);
callvm.call<Fn, js::StringToUpperCase>();
return true;
}
bool CacheIRCompiler::emitStringTrimResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSString* (*)(JSContext*, HandleString);
callvm.call<Fn, js::StringTrim>();
return true;
}
bool CacheIRCompiler::emitStringTrimStartResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSString* (*)(JSContext*, HandleString);
callvm.call<Fn, js::StringTrimStart>();
return true;
}
bool CacheIRCompiler::emitStringTrimEndResult(StringOperandId strId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoCallVM callvm(masm, this, allocator);
Register str = allocator.useRegister(masm, strId);
callvm.prepare();
masm.Push(str);
using Fn = JSString* (*)(JSContext*, HandleString);
callvm.call<Fn, js::StringTrimEnd>();
return true;
}
bool CacheIRCompiler::emitLoadArgumentsObjectArgResult(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArgumentsObjectElement(obj, index, output.valueReg(), scratch,
failure->label());
return true;
}
bool CacheIRCompiler::emitLoadArgumentsObjectArgHoleResult(
ObjOperandId objId, Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArgumentsObjectElementHole(obj, index, output.valueReg(), scratch,
failure->label());
return true;
}
bool CacheIRCompiler::emitLoadArgumentsObjectArgExistsResult(
ObjOperandId objId, Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArgumentsObjectElementExists(obj, index, scratch2, scratch1,
failure->label());
EmitStoreResult(masm, scratch2, JSVAL_TYPE_BOOLEAN, output);
return true;
}
bool CacheIRCompiler::emitLoadDenseElementResult(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
// Bounds check.
Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
masm.spectreBoundsCheck32(index, initLength, scratch2, failure->label());
// Hole check.
BaseObjectElementIndex element(scratch1, index);
masm.branchTestMagic(Assembler::Equal, element, failure->label());
masm.loadTypedOrValue(element, output);
return true;
}
bool CacheIRCompiler::emitGuardInt32IsNonNegative(Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register index = allocator.useRegister(masm, indexId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
return true;
}
bool CacheIRCompiler::emitGuardIndexIsNotDenseElement(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch(allocator, masm);
AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
// Ensure index >= initLength or the element is a hole.
Label notDense;
Address capacity(scratch, ObjectElements::offsetOfInitializedLength());
masm.spectreBoundsCheck32(index, capacity, spectreScratch, ¬Dense);
BaseValueIndex element(scratch, index);
masm.branchTestMagic(Assembler::Equal, element, ¬Dense);
masm.jump(failure->label());
masm.bind(¬Dense);
return true;
}
bool CacheIRCompiler::emitGuardIndexIsValidUpdateOrAdd(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch(allocator, masm);
AutoSpectreBoundsScratchRegister spectreScratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
Label success;
// If length is writable, branch to &success. All indices are writable.
Address flags(scratch, ObjectElements::offsetOfFlags());
masm.branchTest32(Assembler::Zero, flags,
Imm32(ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH),
&success);
// Otherwise, ensure index is in bounds.
Address length(scratch, ObjectElements::offsetOfLength());
masm.spectreBoundsCheck32(index, length, spectreScratch,
/* failure = */ failure->label());
masm.bind(&success);
return true;
}
bool CacheIRCompiler::emitGuardTagNotEqual(ValueTagOperandId lhsId,
ValueTagOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register lhs = allocator.useRegister(masm, lhsId);
Register rhs = allocator.useRegister(masm, rhsId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Label done;
masm.branch32(Assembler::Equal, lhs, rhs, failure->label());
// If both lhs and rhs are numbers, can't use tag comparison to do inequality
// comparison
masm.branchTestNumber(Assembler::NotEqual, lhs, &done);
masm.branchTestNumber(Assembler::NotEqual, rhs, &done);
masm.jump(failure->label());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto(
ObjOperandId objId, uint32_t shapeWrapperOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
StubFieldOffset shapeWrapper(shapeWrapperOffset, StubField::Type::JSObject);
AutoScratchRegister scratch(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
AutoScratchRegister scratch3(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
Address holderAddress(scratch,
sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
GetXrayJitInfo()->holderExpandoSlot));
masm.fallibleUnboxObject(holderAddress, scratch, failure->label());
masm.fallibleUnboxObject(expandoAddress, scratch, failure->label());
// Unwrap the expando before checking its shape.
masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
masm.unboxObject(
Address(scratch, js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
scratch);
emitLoadStubField(shapeWrapper, scratch2);
LoadShapeWrapperContents(masm, scratch2, scratch2, failure->label());
masm.branchTestObjShape(Assembler::NotEqual, scratch, scratch2, scratch3,
scratch, failure->label());
// The reserved slots on the expando should all be in fixed slots.
Address protoAddress(scratch, NativeObject::getFixedSlotOffset(
GetXrayJitInfo()->expandoProtoSlot));
masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardXrayNoExpando(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
Address holderAddress(scratch,
sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(
GetXrayJitInfo()->holderExpandoSlot));
Label done;
masm.fallibleUnboxObject(holderAddress, scratch, &done);
masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitGuardNoAllocationMetadataBuilder(
uint32_t builderAddrOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
StubFieldOffset builderField(builderAddrOffset, StubField::Type::RawPointer);
emitLoadStubField(builderField, scratch);
masm.branchPtr(Assembler::NotEqual, Address(scratch, 0), ImmWord(0),
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardFunctionHasJitEntry(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register fun = allocator.useRegister(masm, funId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfFunctionHasNoJitEntry(fun, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardFunctionHasNoJitEntry(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, funId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfFunctionHasJitEntry(obj, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardFunctionIsNonBuiltinCtor(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register fun = allocator.useRegister(masm, funId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchIfNotFunctionIsNonBuiltinCtor(fun, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardFunctionIsConstructor(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register funcReg = allocator.useRegister(masm, funId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Ensure obj is a constructor
masm.branchTestFunctionFlags(funcReg, FunctionFlags::CONSTRUCTOR,
Assembler::Zero, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardNotClassConstructor(ObjOperandId funId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register fun = allocator.useRegister(masm, funId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchFunctionKind(Assembler::Equal, FunctionFlags::ClassConstructor,
fun, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardArrayIsPacked(ObjOperandId arrayId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register array = allocator.useRegister(masm, arrayId);
AutoScratchRegister scratch(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchArrayIsNotPacked(array, scratch, scratch2, failure->label());
return true;
}
bool CacheIRCompiler::emitGuardArgumentsObjectFlags(ObjOperandId objId,
uint8_t flags) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchTestArgumentsObjectFlags(obj, scratch, flags, Assembler::NonZero,
failure->label());
return true;
}
bool CacheIRCompiler::emitGuardObjectHasSameRealm(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.guardObjectHasSameRealm(obj, scratch, failure->label());
return true;
}
bool CacheIRCompiler::emitLoadDenseElementHoleResult(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Make sure the index is nonnegative.
masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch1);
// Guard on the initialized length.
Label hole;
Address initLength(scratch1, ObjectElements::offsetOfInitializedLength());
masm.spectreBoundsCheck32(index, initLength, scratch2, &hole);
// Load the value.
Label done;
masm.loadValue(BaseObjectElementIndex(scratch1, index), output.valueReg());
masm.branchTestMagic(Assembler::NotEqual, output.valueReg(), &done);
// Load undefined for the hole.
masm.bind(&hole);
masm.moveValue(UndefinedValue(), output.valueReg());
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLoadTypedArrayElementExistsResult(
ObjOperandId objId, IntPtrOperandId indexId, ArrayBufferViewKind viewKind) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Maybe<AutoScratchRegister> scratch2;
if (viewKind == ArrayBufferViewKind::Resizable) {
scratch2.emplace(allocator, masm);
}
Label outOfBounds, done;
// Bounds check.
if (viewKind == ArrayBufferViewKind::FixedLength ||
viewKind == ArrayBufferViewKind::Immutable) {
masm.loadArrayBufferViewLengthIntPtr(obj, scratch);
} else {
// Bounds check doesn't require synchronization. See IsValidIntegerIndex
// abstract operation which reads the underlying buffer byte length using
// "unordered" memory order.
auto sync = Synchronization::None();
masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch, *scratch2);
}
masm.branchPtr(Assembler::BelowOrEqual, scratch, index, &outOfBounds);
EmitStoreBoolean(masm, true, output);
masm.jump(&done);
masm.bind(&outOfBounds);
EmitStoreBoolean(masm, false, output);
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitLoadDenseElementExistsResult(ObjOperandId objId,
Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
// Bounds check. Unsigned compare sends negative indices to next IC.
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
// Hole check.
BaseObjectElementIndex element(scratch, index);
masm.branchTestMagic(Assembler::Equal, element, failure->label());
EmitStoreBoolean(masm, true, output);
return true;
}
bool CacheIRCompiler::emitLoadDenseElementHoleExistsResult(
ObjOperandId objId, Int32OperandId indexId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Make sure the index is nonnegative.
masm.branch32(Assembler::LessThan, index, Imm32(0), failure->label());
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
// Guard on the initialized length.
Label hole;
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::BelowOrEqual, initLength, index, &hole);
// Load value and replace with true.
Label done;
BaseObjectElementIndex element(scratch, index);
masm.branchTestMagic(Assembler::Equal, element, &hole);
EmitStoreBoolean(masm, true, output);
masm.jump(&done);
// Load false for the hole.
masm.bind(&hole);
EmitStoreBoolean(masm, false, output);
masm.bind(&done);
return true;
}
bool CacheIRCompiler::emitPackedArrayPopResult(ObjOperandId arrayId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register array = allocator.useRegister(masm, arrayId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.packedArrayPop(array, output.valueReg(), scratch1, scratch2,
failure->label());
return true;
}
bool CacheIRCompiler::emitPackedArrayShiftResult(ObjOperandId arrayId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register array = allocator.useRegister(masm, arrayId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.packedArrayShift(array, output.valueReg(), scratch1, scratch2,
liveVolatileRegs(), failure->label());
return true;
}
bool CacheIRCompiler::emitIsObjectResult(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
ValueOperand val = allocator.useValueRegister(masm, inputId);
masm.testObjectSet(Assembler::Equal, val, scratch);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitIsPackedArrayResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
Register outputScratch = output.valueReg().scratchReg();
masm.setIsPackedArray(obj, outputScratch, scratch);
masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitIsCallableResult(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
ValueOperand val = allocator.useValueRegister(masm, inputId);
Label isObject, done;
masm.branchTestObject(Assembler::Equal, val, &isObject);
// Primitives are never callable.
masm.move32(Imm32(0), scratch2);
masm.jump(&done);
masm.bind(&isObject);
masm.unboxObject(val, scratch1);
Label isProxy;
masm.isCallable(scratch1, scratch2, &isProxy);
masm.jump(&done);
masm.bind(&isProxy);
{
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSObject* obj);
masm.setupUnalignedABICall(scratch2);
masm.passABIArg(scratch1);
masm.callWithABI<Fn, ObjectIsCallable>();
masm.storeCallBoolResult(scratch2);
LiveRegisterSet ignore;
ignore.add(scratch2);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
}
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch2, output.valueReg());
return true;
}
bool CacheIRCompiler::emitIsConstructorResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
Label isProxy, done;
masm.isConstructor(obj, scratch, &isProxy);
masm.jump(&done);
masm.bind(&isProxy);
{
LiveRegisterSet volatileRegs = liveVolatileRegs();
masm.PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSObject* obj);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(obj);
masm.callWithABI<Fn, ObjectIsConstructor>();
masm.storeCallBoolResult(scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
}
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitIsCrossRealmArrayConstructorResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
masm.setIsCrossRealmArrayConstructor(obj, scratch);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitArrayBufferViewByteOffsetInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitArrayBufferViewByteOffsetDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
ScratchDoubleScope fpscratch(masm);
masm.loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
masm.convertIntPtrToDouble(scratch, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::
emitResizableTypedArrayByteOffsetMaybeOutOfBoundsInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
scratch2);
masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::
emitResizableTypedArrayByteOffsetMaybeOutOfBoundsDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
ScratchDoubleScope fpscratch(masm);
masm.loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(obj, scratch1,
scratch2);
masm.convertIntPtrToDouble(scratch1, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitTypedArrayByteLengthInt32Result(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
masm.typedArrayElementSize(obj, scratch2);
masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitTypedArrayByteLengthDoubleResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
masm.loadArrayBufferViewLengthIntPtr(obj, scratch1);
masm.typedArrayElementSize(obj, scratch2);
masm.mulPtr(scratch2, scratch1);
ScratchDoubleScope fpscratch(masm);
masm.convertIntPtrToDouble(scratch1, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitResizableTypedArrayByteLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Explicit |byteLength| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
masm.typedArrayElementSize(obj, scratch2);
masm.branchMul32(Assembler::Overflow, scratch2.get(), scratch1,
failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitResizableTypedArrayByteLengthDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
// Explicit |byteLength| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
masm.typedArrayElementSize(obj, scratch2);
masm.mulPtr(scratch2, scratch1);
ScratchDoubleScope fpscratch(masm);
masm.convertIntPtrToDouble(scratch1, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitResizableTypedArrayLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Explicit |length| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitResizableTypedArrayLengthDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
// Explicit |length| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableTypedArrayLengthIntPtr(sync, obj, scratch1, scratch2);
ScratchDoubleScope fpscratch(masm);
masm.convertIntPtrToDouble(scratch1, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitTypedArrayElementSizeResult(ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
masm.typedArrayElementSize(obj, scratch);
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
return true;
}
bool CacheIRCompiler::emitResizableDataViewByteLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Explicit |byteLength| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
masm.guardNonNegativeIntPtrToInt32(scratch1, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch1, output.valueReg());
return true;
}
bool CacheIRCompiler::emitResizableDataViewByteLengthDoubleResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
Register obj = allocator.useRegister(masm, objId);
// Explicit |byteLength| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadResizableDataViewByteLengthIntPtr(sync, obj, scratch1, scratch2);
ScratchDoubleScope fpscratch(masm);
masm.convertIntPtrToDouble(scratch1, fpscratch);
masm.boxDouble(fpscratch, output.valueReg(), fpscratch);
return true;
}
bool CacheIRCompiler::emitGrowableSharedArrayBufferByteLengthInt32Result(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Explicit |byteLength| accesses are seq-consistent atomic loads.
auto sync = Synchronization::Load();
masm.loadGrowableSharedArrayBufferByteLengthIntPtr(sync, obj, scratch);
masm.guardNonNegativeIntPtrToInt32(scratch, failure->label());
masm.tagValue(JSVAL_TYPE_INT32, scratch, output.