Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CacheIRCompiler.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/FunctionTypeTraits.h"
#include "mozilla/MaybeOneOf.h"
#include "mozilla/ScopeExit.h"
#include <type_traits>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "builtin/DataViewObject.h"
#include "builtin/Object.h"
#include "gc/GCEnum.h"
#include "jit/BaselineCacheIRCompiler.h"
#include "jit/CacheIRGenerator.h"
#include "jit/IonCacheIRCompiler.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitZone.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/VMFunctions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/friend/XrayJitInfo.h" // js::jit::GetXrayJitInfo
#include "js/ScalarType.h" // js::Scalar::Type
#include "js/SweepingAPI.h"
#include "proxy/DOMProxy.h"
#include "proxy/Proxy.h"
#include "proxy/ScriptedProxyHandler.h"
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BigIntType.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/GeneratorObject.h"
#include "vm/GetterSetter.h"
#include "vm/Interpreter.h"
#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/Uint8Clamped.h"
#include "builtin/Boolean-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/VMFunctionList-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::Maybe;
using JS::ExpandoAndGeneration;
ValueOperand CacheRegisterAllocator::useValueRegister(MacroAssembler& masm,
ValOperandId op) {
OperandLocation& loc = operandLocations_[op.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
currentOpRegs_.add(loc.valueReg());
return loc.valueReg();
case OperandLocation::ValueStack: {
ValueOperand reg = allocateValueRegister(masm);
popValue(masm, &loc, reg);
return reg;
}
case OperandLocation::BaselineFrame: {
ValueOperand reg = allocateValueRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Constant: {
ValueOperand reg = allocateValueRegister(masm);
masm.moveValue(loc.constant(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadReg: {
// Temporarily add the payload register to currentOpRegs_ so
// allocateValueRegister will stay away from it.
currentOpRegs_.add(loc.payloadReg());
ValueOperand reg = allocateValueRegister(masm);
masm.tagValue(loc.payloadType(), loc.payloadReg(), reg);
currentOpRegs_.take(loc.payloadReg());
availableRegs_.add(loc.payloadReg());
loc.setValueReg(reg);
return reg;
}
case OperandLocation::PayloadStack: {
ValueOperand reg = allocateValueRegister(masm);
popPayload(masm, &loc, reg.scratchReg());
masm.tagValue(loc.payloadType(), reg.scratchReg(), reg);
loc.setValueReg(reg);
return reg;
}
case OperandLocation::DoubleReg: {
ValueOperand reg = allocateValueRegister(masm);
{
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), reg, fpscratch);
}
loc.setValueReg(reg);
return reg;
}
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
// Load a value operand directly into a float register. Caller must have
// guarded isNumber on the provided val.
void CacheRegisterAllocator::ensureDoubleRegister(MacroAssembler& masm,
NumberOperandId op,
FloatRegister dest) const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
const OperandLocation& loc = operandLocations_[op.id()];
Label failure, done;
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.ensureDouble(loc.valueReg(), dest, &failure);
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.ensureDouble(addr, dest, &failure);
break;
}
case OperandLocation::DoubleReg: {
masm.moveDouble(loc.doubleReg(), dest);
return;
}
case OperandLocation::Constant: {
MOZ_ASSERT(loc.constant().isNumber(),
"Caller must ensure the operand is a number value");
masm.loadConstantDouble(loc.constant().toNumber(), dest);
return;
}
case OperandLocation::PayloadReg: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
masm.convertInt32ToDouble(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
// Doubles can't be stored in payload registers, so this must be an int32.
MOZ_ASSERT(loc.payloadType() == JSVAL_TYPE_INT32,
"Caller must ensure the operand is a number value");
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.convertInt32ToDouble(addr, dest);
return;
}
case OperandLocation::Uninitialized:
MOZ_CRASH("Unhandled operand type in ensureDoubleRegister");
return;
}
masm.jump(&done);
masm.bind(&failure);
masm.assumeUnreachable(
"Missing guard allowed non-number to hit ensureDoubleRegister");
masm.bind(&done);
}
void CacheRegisterAllocator::copyToScratchRegister(MacroAssembler& masm,
TypedOperandId typedId,
Register dest) const {
// If AutoScratchFloatRegister is active, we have to add sizeof(double) to
// any stack slot offsets below.
int32_t stackOffset = hasAutoScratchFloatRegisterSpill() ? sizeof(double) : 0;
const OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg: {
masm.unboxNonDouble(loc.valueReg(), dest, typedId.type());
break;
}
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
addr.offset += stackOffset;
masm.unboxNonDouble(addr, dest, typedId.type());
break;
}
case OperandLocation::PayloadReg: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
masm.mov(loc.payloadReg(), dest);
return;
}
case OperandLocation::PayloadStack: {
MOZ_ASSERT(loc.payloadType() == typedId.type());
MOZ_ASSERT(loc.payloadStack() <= stackPushed_);
Address addr = payloadAddress(masm, &loc);
addr.offset += stackOffset;
masm.loadPtr(addr, dest);
return;
}
case OperandLocation::DoubleReg:
case OperandLocation::Constant:
case OperandLocation::Uninitialized:
MOZ_CRASH("Unhandled operand location");
}
}
void CacheRegisterAllocator::copyToScratchValueRegister(
MacroAssembler& masm, ValOperandId valId, ValueOperand dest) const {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
const OperandLocation& loc = operandLocations_[valId.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(loc.valueReg(), dest);
break;
case OperandLocation::ValueStack: {
Address addr = valueAddress(masm, &loc);
masm.loadValue(addr, dest);
break;
}
case OperandLocation::BaselineFrame: {
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.loadValue(addr, dest);
break;
}
case OperandLocation::Constant:
masm.moveValue(loc.constant(), dest);
break;
case OperandLocation::PayloadReg:
masm.tagValue(loc.payloadType(), loc.payloadReg(), dest);
break;
case OperandLocation::PayloadStack: {
Address addr = payloadAddress(masm, &loc);
masm.loadPtr(addr, dest.scratchReg());
masm.tagValue(loc.payloadType(), dest.scratchReg(), dest);
break;
}
case OperandLocation::DoubleReg: {
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(loc.doubleReg(), dest, fpscratch);
break;
}
case OperandLocation::Uninitialized:
MOZ_CRASH();
}
}
Register CacheRegisterAllocator::useRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
currentOpRegs_.add(loc.payloadReg());
return loc.payloadReg();
case OperandLocation::ValueReg: {
// It's possible the value is still boxed: as an optimization, we unbox
// the first time we use a value as object.
ValueOperand val = loc.valueReg();
availableRegs_.add(val);
Register reg = val.scratchReg();
availableRegs_.take(reg);
masm.unboxNonDouble(val, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
currentOpRegs_.add(reg);
return reg;
}
case OperandLocation::PayloadStack: {
Register reg = allocateRegister(masm);
popPayload(masm, &loc, reg);
return reg;
}
case OperandLocation::ValueStack: {
// The value is on the stack, but boxed. If it's on top of the stack we
// unbox it and then remove it from the stack, else we just unbox.
Register reg = allocateRegister(masm);
if (loc.valueStack() == stackPushed_) {
masm.unboxNonDouble(Address(masm.getStackPointer(), 0), reg,
typedId.type());
masm.addToStackPtr(Imm32(sizeof(js::Value)));
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
stackPushed_ -= sizeof(js::Value);
} else {
MOZ_ASSERT(loc.valueStack() < stackPushed_);
masm.unboxNonDouble(
Address(masm.getStackPointer(), stackPushed_ - loc.valueStack()),
reg, typedId.type());
}
loc.setPayloadReg(reg, typedId.type());
return reg;
}
case OperandLocation::BaselineFrame: {
Register reg = allocateRegister(masm);
Address addr = addressOf(masm, loc.baselineFrameSlot());
masm.unboxNonDouble(addr, reg, typedId.type());
loc.setPayloadReg(reg, typedId.type());
return reg;
};
case OperandLocation::Constant: {
Value v = loc.constant();
Register reg = allocateRegister(masm);
if (v.isString()) {
masm.movePtr(ImmGCPtr(v.toString()), reg);
} else if (v.isSymbol()) {
masm.movePtr(ImmGCPtr(v.toSymbol()), reg);
} else if (v.isBigInt()) {
masm.movePtr(ImmGCPtr(v.toBigInt()), reg);
} else if (v.isBoolean()) {
masm.movePtr(ImmWord(v.toBoolean() ? 1 : 0), reg);
} else {
MOZ_CRASH("Unexpected Value");
}
loc.setPayloadReg(reg, v.extractNonDoubleType());
return reg;
}
case OperandLocation::DoubleReg:
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
ConstantOrRegister CacheRegisterAllocator::useConstantOrRegister(
MacroAssembler& masm, ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::Constant:
return loc.constant();
case OperandLocation::PayloadReg:
case OperandLocation::PayloadStack: {
JSValueType payloadType = loc.payloadType();
Register reg = useRegister(masm, TypedOperandId(val, payloadType));
return TypedOrValueRegister(MIRTypeFromValueType(payloadType),
AnyRegister(reg));
}
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return TypedOrValueRegister(useValueRegister(masm, val));
case OperandLocation::DoubleReg:
return TypedOrValueRegister(MIRType::Double,
AnyRegister(loc.doubleReg()));
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH();
}
Register CacheRegisterAllocator::defineRegister(MacroAssembler& masm,
TypedOperandId typedId) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[typedId.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
Register reg = allocateRegister(masm);
loc.setPayloadReg(reg, typedId.type());
return reg;
}
ValueOperand CacheRegisterAllocator::defineValueRegister(MacroAssembler& masm,
ValOperandId val) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
OperandLocation& loc = operandLocations_[val.id()];
MOZ_ASSERT(loc.kind() == OperandLocation::Uninitialized);
ValueOperand reg = allocateValueRegister(masm);
loc.setValueReg(reg);
return reg;
}
void CacheRegisterAllocator::freeDeadOperandLocations(MacroAssembler& masm) {
// See if any operands are dead so we can reuse their registers. Note that
// we skip the input operands, as those are also used by failure paths, and
// we currently don't track those uses.
for (size_t i = writer_.numInputOperands(); i < operandLocations_.length();
i++) {
if (!writer_.operandIsDead(i, currentInstruction_)) {
continue;
}
OperandLocation& loc = operandLocations_[i];
switch (loc.kind()) {
case OperandLocation::PayloadReg:
availableRegs_.add(loc.payloadReg());
break;
case OperandLocation::ValueReg:
availableRegs_.add(loc.valueReg());
break;
case OperandLocation::PayloadStack:
masm.propagateOOM(freePayloadSlots_.append(loc.payloadStack()));
break;
case OperandLocation::ValueStack:
masm.propagateOOM(freeValueSlots_.append(loc.valueStack()));
break;
case OperandLocation::Uninitialized:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
break;
}
loc.setUninitialized();
}
}
void CacheRegisterAllocator::discardStack(MacroAssembler& masm) {
// This should only be called when we are no longer using the operands,
// as we're discarding everything from the native stack. Set all operand
// locations to Uninitialized to catch bugs.
for (size_t i = 0; i < operandLocations_.length(); i++) {
operandLocations_[i].setUninitialized();
}
if (stackPushed_ > 0) {
masm.addToStackPtr(Imm32(stackPushed_));
stackPushed_ = 0;
}
freePayloadSlots_.clear();
freeValueSlots_.clear();
}
Register CacheRegisterAllocator::allocateRegister(MacroAssembler& masm) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
if (availableRegs_.empty()) {
freeDeadOperandLocations(masm);
}
if (availableRegs_.empty()) {
// Still no registers available, try to spill unused operands to
// the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
Register reg = loc.payloadReg();
if (currentOpRegs_.has(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break; // We got a register, so break out of the loop.
}
if (loc.kind() == OperandLocation::ValueReg) {
ValueOperand reg = loc.valueReg();
if (currentOpRegs_.aliases(reg)) {
continue;
}
spillOperandToStack(masm, &loc);
availableRegs_.add(reg);
break; // Break out of the loop.
}
}
}
if (availableRegs_.empty() && !availableRegsAfterSpill_.empty()) {
Register reg = availableRegsAfterSpill_.takeAny();
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
availableRegs_.add(reg);
}
// At this point, there must be a free register.
MOZ_RELEASE_ASSERT(!availableRegs_.empty());
Register reg = availableRegs_.takeAny();
currentOpRegs_.add(reg);
return reg;
}
void CacheRegisterAllocator::allocateFixedRegister(MacroAssembler& masm,
Register reg) {
MOZ_ASSERT(!addedFailurePath_);
MOZ_ASSERT(!hasAutoScratchFloatRegisterSpill());
// Fixed registers should be allocated first, to ensure they're
// still available.
MOZ_ASSERT(!currentOpRegs_.has(reg), "Register is in use");
freeDeadOperandLocations(masm);
if (availableRegs_.has(reg)) {
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
// Register may be available only after spilling contents.
if (availableRegsAfterSpill_.has(reg)) {
availableRegsAfterSpill_.take(reg);
masm.push(reg);
stackPushed_ += sizeof(uintptr_t);
masm.propagateOOM(spilledRegs_.append(SpilledRegister(reg, stackPushed_)));
currentOpRegs_.add(reg);
return;
}
// The register must be used by some operand. Spill it to the stack.
for (size_t i = 0; i < operandLocations_.length(); i++) {
OperandLocation& loc = operandLocations_[i];
if (loc.kind() == OperandLocation::PayloadReg) {
if (loc.payloadReg() != reg) {
continue;
}
spillOperandToStackOrRegister(masm, &loc);
currentOpRegs_.add(reg);
return;
}
if (loc.kind() == OperandLocation::ValueReg) {
if (!loc.valueReg().aliases(reg)) {
continue;
}
ValueOperand valueReg = loc.valueReg();
spillOperandToStackOrRegister(masm, &loc);
availableRegs_.add(valueReg);
availableRegs_.take(reg);
currentOpRegs_.add(reg);
return;
}
}
MOZ_CRASH("Invalid register");
}
void CacheRegisterAllocator::allocateFixedValueRegister(MacroAssembler& masm,
ValueOperand reg) {
#ifdef JS_NUNBOX32
allocateFixedRegister(masm, reg.payloadReg());
allocateFixedRegister(masm, reg.typeReg());
#else
allocateFixedRegister(masm, reg.valueReg());
#endif
}
#ifdef JS_NUNBOX32
// Possible miscompilation in clang-12 (bug 1689641)
MOZ_NEVER_INLINE
#endif
ValueOperand CacheRegisterAllocator::allocateValueRegister(
MacroAssembler& masm) {
#ifdef JS_NUNBOX32
Register reg1 = allocateRegister(masm);
Register reg2 = allocateRegister(masm);
return ValueOperand(reg1, reg2);
#else
Register reg = allocateRegister(masm);
return ValueOperand(reg);
#endif
}
bool CacheRegisterAllocator::init() {
if (!origInputLocations_.resize(writer_.numInputOperands())) {
return false;
}
if (!operandLocations_.resize(writer_.numOperandIds())) {
return false;
}
return true;
}
void CacheRegisterAllocator::initAvailableRegsAfterSpill() {
// Registers not in availableRegs_ and not used by input operands are
// available after being spilled.
availableRegsAfterSpill_.set() = GeneralRegisterSet::Intersect(
GeneralRegisterSet::Not(availableRegs_.set()),
GeneralRegisterSet::Not(inputRegisterSet()));
}
void CacheRegisterAllocator::fixupAliasedInputs(MacroAssembler& masm) {
// If IC inputs alias each other, make sure they are stored in different
// locations so we don't have to deal with this complexity in the rest of
// the allocator.
//
// Note that this can happen in IonMonkey with something like |o.foo = o|
// or |o[i] = i|.
size_t numInputs = writer_.numInputOperands();
MOZ_ASSERT(origInputLocations_.length() == numInputs);
for (size_t i = 1; i < numInputs; i++) {
OperandLocation& loc1 = operandLocations_[i];
if (!loc1.isInRegister()) {
continue;
}
for (size_t j = 0; j < i; j++) {
OperandLocation& loc2 = operandLocations_[j];
if (!loc1.aliasesReg(loc2)) {
continue;
}
// loc1 and loc2 alias so we spill one of them. If one is a
// ValueReg and the other is a PayloadReg, we have to spill the
// PayloadReg: spilling the ValueReg instead would leave its type
// register unallocated on 32-bit platforms.
if (loc1.kind() == OperandLocation::ValueReg) {
spillOperandToStack(masm, &loc2);
} else {
MOZ_ASSERT(loc1.kind() == OperandLocation::PayloadReg);
spillOperandToStack(masm, &loc1);
break; // Spilled loc1, so nothing else will alias it.
}
}
}
#ifdef DEBUG
assertValidState();
#endif
}
GeneralRegisterSet CacheRegisterAllocator::inputRegisterSet() const {
MOZ_ASSERT(origInputLocations_.length() == writer_.numInputOperands());
AllocatableGeneralRegisterSet result;
for (size_t i = 0; i < writer_.numInputOperands(); i++) {
const OperandLocation& loc = operandLocations_[i];
MOZ_ASSERT(loc == origInputLocations_[i]);
switch (loc.kind()) {
case OperandLocation::PayloadReg:
result.addUnchecked(loc.payloadReg());
continue;
case OperandLocation::ValueReg:
result.addUnchecked(loc.valueReg());
continue;
case OperandLocation::PayloadStack:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
case OperandLocation::Constant:
case OperandLocation::DoubleReg:
continue;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
return result.set();
}
JSValueType CacheRegisterAllocator::knownType(ValOperandId val) const {
const OperandLocation& loc = operandLocations_[val.id()];
switch (loc.kind()) {
case OperandLocation::ValueReg:
case OperandLocation::ValueStack:
case OperandLocation::BaselineFrame:
return JSVAL_TYPE_UNKNOWN;
case OperandLocation::PayloadStack:
case OperandLocation::PayloadReg:
return loc.payloadType();
case OperandLocation::Constant:
return loc.constant().isDouble() ? JSVAL_TYPE_DOUBLE
: loc.constant().extractNonDoubleType();
case OperandLocation::DoubleReg:
return JSVAL_TYPE_DOUBLE;
case OperandLocation::Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
void CacheRegisterAllocator::initInputLocation(
size_t i, const TypedOrValueRegister& reg) {
if (reg.hasValue()) {
initInputLocation(i, reg.valueReg());
} else if (reg.typedReg().isFloat()) {
MOZ_ASSERT(reg.type() == MIRType::Double);
initInputLocation(i, reg.typedReg().fpu());
} else {
initInputLocation(i, reg.typedReg().gpr(),
ValueTypeFromMIRType(reg.type()));
}
}
void CacheRegisterAllocator::initInputLocation(
size_t i, const ConstantOrRegister& value) {
if (value.constant()) {
initInputLocation(i, value.value());
} else {
initInputLocation(i, value.reg());
}
}
void CacheRegisterAllocator::spillOperandToStack(MacroAssembler& masm,
OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
if (loc->kind() == OperandLocation::ValueReg) {
if (!freeValueSlots_.empty()) {
uint32_t stackPos = freeValueSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storeValue(loc->valueReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setValueStack(stackPos);
return;
}
stackPushed_ += sizeof(js::Value);
masm.pushValue(loc->valueReg());
loc->setValueStack(stackPushed_);
return;
}
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!freePayloadSlots_.empty()) {
uint32_t stackPos = freePayloadSlots_.popCopy();
MOZ_ASSERT(stackPos <= stackPushed_);
masm.storePtr(loc->payloadReg(),
Address(masm.getStackPointer(), stackPushed_ - stackPos));
loc->setPayloadStack(stackPos, loc->payloadType());
return;
}
stackPushed_ += sizeof(uintptr_t);
masm.push(loc->payloadReg());
loc->setPayloadStack(stackPushed_, loc->payloadType());
}
void CacheRegisterAllocator::spillOperandToStackOrRegister(
MacroAssembler& masm, OperandLocation* loc) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
// If enough registers are available, use them.
if (loc->kind() == OperandLocation::ValueReg) {
static const size_t BoxPieces = sizeof(Value) / sizeof(uintptr_t);
if (availableRegs_.set().size() >= BoxPieces) {
ValueOperand reg = availableRegs_.takeAnyValue();
masm.moveValue(loc->valueReg(), reg);
loc->setValueReg(reg);
return;
}
} else {
MOZ_ASSERT(loc->kind() == OperandLocation::PayloadReg);
if (!availableRegs_.empty()) {
Register reg = availableRegs_.takeAny();
masm.movePtr(loc->payloadReg(), reg);
loc->setPayloadReg(reg, loc->payloadType());
return;
}
}
// Not enough registers available, spill to the stack.
spillOperandToStack(masm, loc);
}
void CacheRegisterAllocator::popPayload(MacroAssembler& masm,
OperandLocation* loc, Register dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >= sizeof(uintptr_t));
// The payload is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->payloadStack() == stackPushed_) {
masm.pop(dest);
stackPushed_ -= sizeof(uintptr_t);
} else {
MOZ_ASSERT(loc->payloadStack() < stackPushed_);
masm.loadPtr(payloadAddress(masm, loc), dest);
masm.propagateOOM(freePayloadSlots_.append(loc->payloadStack()));
}
loc->setPayloadReg(dest, loc->payloadType());
}
Address CacheRegisterAllocator::valueAddress(MacroAssembler& masm,
const OperandLocation* loc) const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->valueStack());
}
Address CacheRegisterAllocator::payloadAddress(
MacroAssembler& masm, const OperandLocation* loc) const {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
return Address(masm.getStackPointer(), stackPushed_ - loc->payloadStack());
}
void CacheRegisterAllocator::popValue(MacroAssembler& masm,
OperandLocation* loc, ValueOperand dest) {
MOZ_ASSERT(loc >= operandLocations_.begin() && loc < operandLocations_.end());
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));
// The Value is on the stack. If it's on top of the stack we can just
// pop it, else we emit a load.
if (loc->valueStack() == stackPushed_) {
masm.popValue(dest);
stackPushed_ -= sizeof(js::Value);
} else {
MOZ_ASSERT(loc->valueStack() < stackPushed_);
masm.loadValue(
Address(masm.getStackPointer(), stackPushed_ - loc->valueStack()),
dest);
masm.propagateOOM(freeValueSlots_.append(loc->valueStack()));
}
loc->setValueReg(dest);
}
#ifdef DEBUG
void CacheRegisterAllocator::assertValidState() const {
// Assert different operands don't have aliasing storage. We depend on this
// when spilling registers, for instance.
if (!JitOptions.fullDebugChecks) {
return;
}
for (size_t i = 0; i < operandLocations_.length(); i++) {
const auto& loc1 = operandLocations_[i];
if (loc1.isUninitialized()) {
continue;
}
for (size_t j = 0; j < i; j++) {
const auto& loc2 = operandLocations_[j];
if (loc2.isUninitialized()) {
continue;
}
MOZ_ASSERT(!loc1.aliasesReg(loc2));
}
}
}
#endif
bool OperandLocation::aliasesReg(const OperandLocation& other) const {
MOZ_ASSERT(&other != this);
switch (other.kind_) {
case PayloadReg:
return aliasesReg(other.payloadReg());
case ValueReg:
return aliasesReg(other.valueReg());
case PayloadStack:
case ValueStack:
case BaselineFrame:
case Constant:
case DoubleReg:
return false;
case Uninitialized:
break;
}
MOZ_CRASH("Invalid kind");
}
void CacheRegisterAllocator::restoreInputState(MacroAssembler& masm,
bool shouldDiscardStack) {
size_t numInputOperands = origInputLocations_.length();
MOZ_ASSERT(writer_.numInputOperands() == numInputOperands);
for (size_t j = 0; j < numInputOperands; j++) {
const OperandLocation& dest = origInputLocations_[j];
OperandLocation& cur = operandLocations_[j];
if (dest == cur) {
continue;
}
auto autoAssign = mozilla::MakeScopeExit([&] { cur = dest; });
// We have a cycle if a destination register will be used later
// as source register. If that happens, just push the current value
// on the stack and later get it from there.
for (size_t k = j + 1; k < numInputOperands; k++) {
OperandLocation& laterSource = operandLocations_[k];
if (dest.aliasesReg(laterSource)) {
spillOperandToStack(masm, &laterSource);
}
}
if (dest.kind() == OperandLocation::ValueReg) {
// We have to restore a Value register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
masm.moveValue(cur.valueReg(), dest.valueReg());
continue;
case OperandLocation::PayloadReg:
masm.tagValue(cur.payloadType(), cur.payloadReg(), dest.valueReg());
continue;
case OperandLocation::PayloadStack: {
Register scratch = dest.valueReg().scratchReg();
popPayload(masm, &cur, scratch);
masm.tagValue(cur.payloadType(), scratch, dest.valueReg());
continue;
}
case OperandLocation::ValueStack:
popValue(masm, &cur, dest.valueReg());
continue;
case OperandLocation::DoubleReg:
masm.boxDouble(cur.doubleReg(), dest.valueReg(), cur.doubleReg());
continue;
case OperandLocation::Constant:
case OperandLocation::BaselineFrame:
case OperandLocation::Uninitialized:
break;
}
} else if (dest.kind() == OperandLocation::PayloadReg) {
// We have to restore a payload register.
switch (cur.kind()) {
case OperandLocation::ValueReg:
MOZ_ASSERT(dest.payloadType() != JSVAL_TYPE_DOUBLE);
masm.unboxNonDouble(cur.valueReg(), dest.payloadReg(),
dest.payloadType());
continue;
case OperandLocation::PayloadReg:
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
masm.mov(cur.payloadReg(), dest.payloadReg());
continue;
case OperandLocation::PayloadStack: {
MOZ_ASSERT(cur.payloadType() == dest.payloadType());
popPayload(masm, &cur, dest.payloadReg());
continue;
}
case OperandLocation::ValueStack:
MOZ_ASSERT(stackPushed_ >= sizeof(js::Value));