Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_LIR_h
#define jit_LIR_h
// This file declares the core data structures for LIR: storage allocations for
// inputs and outputs, as well as the interface instructions must conform to.
#include "mozilla/Array.h"
#include "mozilla/Casting.h"
#include "jit/Bailouts.h"
#include "jit/FixedList.h"
#include "jit/InlineList.h"
#include "jit/JitAllocPolicy.h"
#include "jit/LIROpsGenerated.h"
#include "jit/MIR-wasm.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "jit/Registers.h"
#include "jit/Safepoints.h"
#include "util/Memory.h"
namespace js {
namespace jit {
class LUse;
class LGeneralReg;
class LFloatReg;
class LStackSlot;
class LStackArea;
class LArgument;
class LConstantIndex;
class LInstruction;
class LNode;
class LDefinition;
class MBasicBlock;
class MIRGenerator;
static const uint32_t VREG_INCREMENT = 1;
static const uint32_t THIS_FRAME_ARGSLOT = 0;
#if defined(JS_NUNBOX32)
static const uint32_t BOX_PIECES = 2;
static const uint32_t VREG_TYPE_OFFSET = 0;
static const uint32_t VREG_DATA_OFFSET = 1;
static const uint32_t TYPE_INDEX = 0;
static const uint32_t PAYLOAD_INDEX = 1;
static const uint32_t INT64LOW_INDEX = 0;
static const uint32_t INT64HIGH_INDEX = 1;
#elif defined(JS_PUNBOX64)
static const uint32_t BOX_PIECES = 1;
#else
# error "Unknown!"
#endif
static const uint32_t INT64_PIECES = sizeof(int64_t) / sizeof(uintptr_t);
// Represents storage for an operand. For constants, the pointer is tagged
// with a single bit, and the untagged pointer is a pointer to a Value.
class LAllocation {
uintptr_t bits_;
// 3 bits gives us enough for an interesting set of Kinds and also fits
// within the alignment bits of pointers to Value, which are always
// 8-byte aligned.
static const uintptr_t KIND_BITS = 3;
static const uintptr_t KIND_SHIFT = 0;
static const uintptr_t KIND_MASK = (1 << KIND_BITS) - 1;
protected:
#ifdef JS_64BIT
static const uintptr_t DATA_BITS = sizeof(uint32_t) * 8;
#else
static const uintptr_t DATA_BITS = (sizeof(uint32_t) * 8) - KIND_BITS;
#endif
static const uintptr_t DATA_SHIFT = KIND_SHIFT + KIND_BITS;
public:
enum Kind {
CONSTANT_VALUE, // MConstant*.
CONSTANT_INDEX, // Constant arbitrary index.
USE, // Use of a virtual register, with physical allocation policy.
GPR, // General purpose register.
FPU, // Floating-point register.
STACK_SLOT, // Stack slot.
STACK_AREA, // Stack area.
ARGUMENT_SLOT // Argument slot.
};
static const uintptr_t DATA_MASK = (uintptr_t(1) << DATA_BITS) - 1;
protected:
uint32_t data() const {
MOZ_ASSERT(!hasIns());
return mozilla::AssertedCast<uint32_t>(bits_ >> DATA_SHIFT);
}
void setData(uintptr_t data) {
MOZ_ASSERT(!hasIns());
MOZ_ASSERT(data <= DATA_MASK);
bits_ &= ~(DATA_MASK << DATA_SHIFT);
bits_ |= (data << DATA_SHIFT);
}
void setKindAndData(Kind kind, uintptr_t data) {
MOZ_ASSERT(data <= DATA_MASK);
bits_ = (uintptr_t(kind) << KIND_SHIFT) | data << DATA_SHIFT;
MOZ_ASSERT(!hasIns());
}
bool hasIns() const { return isStackArea(); }
const LInstruction* ins() const {
MOZ_ASSERT(hasIns());
return reinterpret_cast<const LInstruction*>(bits_ &
~(KIND_MASK << KIND_SHIFT));
}
LInstruction* ins() {
MOZ_ASSERT(hasIns());
return reinterpret_cast<LInstruction*>(bits_ & ~(KIND_MASK << KIND_SHIFT));
}
void setKindAndIns(Kind kind, LInstruction* ins) {
uintptr_t data = reinterpret_cast<uintptr_t>(ins);
MOZ_ASSERT((data & (KIND_MASK << KIND_SHIFT)) == 0);
bits_ = data | (uintptr_t(kind) << KIND_SHIFT);
MOZ_ASSERT(hasIns());
}
LAllocation(Kind kind, uintptr_t data) { setKindAndData(kind, data); }
LAllocation(Kind kind, LInstruction* ins) { setKindAndIns(kind, ins); }
explicit LAllocation(Kind kind) { setKindAndData(kind, 0); }
public:
LAllocation() : bits_(0) { MOZ_ASSERT(isBogus()); }
// The MConstant pointer must have its low bits cleared.
explicit LAllocation(const MConstant* c) {
MOZ_ASSERT(c);
bits_ = uintptr_t(c);
MOZ_ASSERT((bits_ & (KIND_MASK << KIND_SHIFT)) == 0);
bits_ |= CONSTANT_VALUE << KIND_SHIFT;
}
inline explicit LAllocation(AnyRegister reg);
Kind kind() const { return (Kind)((bits_ >> KIND_SHIFT) & KIND_MASK); }
bool isBogus() const { return bits_ == 0; }
bool isUse() const { return kind() == USE; }
bool isConstant() const { return isConstantValue() || isConstantIndex(); }
bool isConstantValue() const { return kind() == CONSTANT_VALUE; }
bool isConstantIndex() const { return kind() == CONSTANT_INDEX; }
bool isGeneralReg() const { return kind() == GPR; }
bool isFloatReg() const { return kind() == FPU; }
bool isStackSlot() const { return kind() == STACK_SLOT; }
bool isStackArea() const { return kind() == STACK_AREA; }
bool isArgument() const { return kind() == ARGUMENT_SLOT; }
bool isAnyRegister() const { return isGeneralReg() || isFloatReg(); }
bool isMemory() const { return isStackSlot() || isArgument(); }
inline uint32_t memorySlot() const;
inline LUse* toUse();
inline const LUse* toUse() const;
inline const LGeneralReg* toGeneralReg() const;
inline const LFloatReg* toFloatReg() const;
inline const LStackSlot* toStackSlot() const;
inline LStackArea* toStackArea();
inline const LStackArea* toStackArea() const;
inline const LArgument* toArgument() const;
inline const LConstantIndex* toConstantIndex() const;
inline AnyRegister toAnyRegister() const;
const MConstant* toConstant() const {
MOZ_ASSERT(isConstantValue());
return reinterpret_cast<const MConstant*>(bits_ &
~(KIND_MASK << KIND_SHIFT));
}
bool operator==(const LAllocation& other) const {
return bits_ == other.bits_;
}
bool operator!=(const LAllocation& other) const {
return bits_ != other.bits_;
}
HashNumber hash() const { return bits_; }
uintptr_t asRawBits() const { return bits_; }
bool aliases(const LAllocation& other) const;
#ifdef JS_JITSPEW
UniqueChars toString() const;
void dump() const;
#endif
};
class LUse : public LAllocation {
static const uint32_t POLICY_BITS = 3;
static const uint32_t POLICY_SHIFT = 0;
static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
#ifdef JS_CODEGEN_ARM64
static const uint32_t REG_BITS = 7;
#else
static const uint32_t REG_BITS = 6;
#endif
static const uint32_t REG_SHIFT = POLICY_SHIFT + POLICY_BITS;
static const uint32_t REG_MASK = (1 << REG_BITS) - 1;
// Whether the physical register for this operand may be reused for a def.
static const uint32_t USED_AT_START_BITS = 1;
static const uint32_t USED_AT_START_SHIFT = REG_SHIFT + REG_BITS;
static const uint32_t USED_AT_START_MASK = (1 << USED_AT_START_BITS) - 1;
// The REG field will hold the register code for any Register or
// FloatRegister, though not for an AnyRegister.
static_assert(std::max(Registers::Total, FloatRegisters::Total) <=
REG_MASK + 1,
"The field must be able to represent any register code");
public:
// Virtual registers get the remaining bits.
static const uint32_t VREG_BITS =
DATA_BITS - (USED_AT_START_SHIFT + USED_AT_START_BITS);
static const uint32_t VREG_SHIFT = USED_AT_START_SHIFT + USED_AT_START_BITS;
static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
enum Policy {
// Input should be in a read-only register or stack slot.
ANY,
// Input must be in a read-only register.
REGISTER,
// Input must be in a specific, read-only register.
FIXED,
// Keep the used virtual register alive, and use whatever allocation is
// available. This is similar to ANY but hints to the register allocator
// that it is never useful to optimize this site.
KEEPALIVE,
// Input must be allocated on the stack. Only used when extracting stack
// results from stack result areas.
STACK,
// For snapshot inputs, indicates that the associated instruction will
// write this input to its output register before bailing out.
// The register allocator may thus allocate that output register, and
// does not need to keep the virtual register alive (alternatively,
// this may be treated as KEEPALIVE).
RECOVERED_INPUT
};
void set(Policy policy, uint32_t reg, bool usedAtStart) {
MOZ_ASSERT(reg <= REG_MASK, "Register code must fit in field");
setKindAndData(USE, (policy << POLICY_SHIFT) | (reg << REG_SHIFT) |
((usedAtStart ? 1 : 0) << USED_AT_START_SHIFT));
}
public:
LUse(uint32_t vreg, Policy policy, bool usedAtStart = false) {
set(policy, 0, usedAtStart);
setVirtualRegister(vreg);
}
explicit LUse(Policy policy, bool usedAtStart = false) {
set(policy, 0, usedAtStart);
}
explicit LUse(Register reg, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
}
explicit LUse(FloatRegister reg, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
}
LUse(Register reg, uint32_t virtualRegister, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
setVirtualRegister(virtualRegister);
}
LUse(FloatRegister reg, uint32_t virtualRegister, bool usedAtStart = false) {
set(FIXED, reg.code(), usedAtStart);
setVirtualRegister(virtualRegister);
}
void setVirtualRegister(uint32_t index) {
MOZ_ASSERT(index < VREG_MASK);
uint32_t old = data() & ~(VREG_MASK << VREG_SHIFT);
setData(old | (index << VREG_SHIFT));
}
Policy policy() const {
Policy policy = (Policy)((data() >> POLICY_SHIFT) & POLICY_MASK);
return policy;
}
uint32_t virtualRegister() const {
uint32_t index = (data() >> VREG_SHIFT) & VREG_MASK;
MOZ_ASSERT(index != 0);
return index;
}
uint32_t registerCode() const {
MOZ_ASSERT(policy() == FIXED);
return (data() >> REG_SHIFT) & REG_MASK;
}
bool isFixedRegister() const { return policy() == FIXED; }
bool usedAtStart() const {
return !!((data() >> USED_AT_START_SHIFT) & USED_AT_START_MASK);
}
};
static const uint32_t MAX_VIRTUAL_REGISTERS = LUse::VREG_MASK;
class LBoxAllocation {
#ifdef JS_NUNBOX32
LAllocation type_;
LAllocation payload_;
#else
LAllocation value_;
#endif
public:
#ifdef JS_NUNBOX32
LBoxAllocation(LAllocation type, LAllocation payload)
: type_(type), payload_(payload) {}
LAllocation type() const { return type_; }
LAllocation payload() const { return payload_; }
#else
explicit LBoxAllocation(LAllocation value) : value_(value) {}
LAllocation value() const { return value_; }
#endif
};
template <class ValT>
class LInt64Value {
#if JS_BITS_PER_WORD == 32
ValT high_;
ValT low_;
#else
ValT value_;
#endif
public:
LInt64Value() = default;
#if JS_BITS_PER_WORD == 32
LInt64Value(ValT high, ValT low) : high_(high), low_(low) {}
ValT high() const { return high_; }
ValT low() const { return low_; }
const ValT* pointerHigh() const { return &high_; }
const ValT* pointerLow() const { return &low_; }
#else
explicit LInt64Value(ValT value) : value_(value) {}
ValT value() const { return value_; }
const ValT* pointer() const { return &value_; }
#endif
};
using LInt64Allocation = LInt64Value<LAllocation>;
class LGeneralReg : public LAllocation {
public:
explicit LGeneralReg(Register reg) : LAllocation(GPR, reg.code()) {}
Register reg() const { return Register::FromCode(data()); }
};
class LFloatReg : public LAllocation {
public:
explicit LFloatReg(FloatRegister reg) : LAllocation(FPU, reg.code()) {}
FloatRegister reg() const { return FloatRegister::FromCode(data()); }
};
// Arbitrary constant index.
class LConstantIndex : public LAllocation {
explicit LConstantIndex(uint32_t index)
: LAllocation(CONSTANT_INDEX, index) {}
public:
static LConstantIndex FromIndex(uint32_t index) {
return LConstantIndex(index);
}
uint32_t index() const { return data(); }
};
// Stack slots are indices into the stack. The indices are byte indices.
class LStackSlot : public LAllocation {
// Stack slots are aligned to 32-bit word boundaries.
static constexpr uint32_t SLOT_ALIGNMENT = 4;
// Stack slot width is stored in the two least significant bits.
static constexpr uint32_t WIDTH_MASK = SLOT_ALIGNMENT - 1;
// Remaining bits hold the stack slot offset.
static constexpr uint32_t SLOT_MASK = ~WIDTH_MASK;
public:
enum Width {
Word,
DoubleWord,
QuadWord,
};
class SlotAndWidth {
uint32_t data_;
explicit SlotAndWidth(uint32_t data) : data_(data) {}
public:
static SlotAndWidth fromData(uint32_t data) { return SlotAndWidth(data); }
explicit SlotAndWidth(uint32_t slot, Width width) {
MOZ_ASSERT(slot % SLOT_ALIGNMENT == 0);
MOZ_ASSERT(uint32_t(width) < SLOT_ALIGNMENT);
data_ = slot | uint32_t(width);
}
uint32_t data() const { return data_; }
uint32_t slot() const { return data_ & SLOT_MASK; }
Width width() const { return Width(data_ & WIDTH_MASK); }
};
explicit LStackSlot(SlotAndWidth slotAndWidth)
: LAllocation(STACK_SLOT, slotAndWidth.data()) {}
LStackSlot(uint32_t slot, Width width)
: LStackSlot(SlotAndWidth(slot, width)) {}
uint32_t slot() const { return SlotAndWidth::fromData(data()).slot(); }
Width width() const { return SlotAndWidth::fromData(data()).width(); }
// |Type| is LDefinition::Type, but can't forward declare a nested definition.
template <typename Type>
static Width width(Type type);
static uint32_t ByteWidth(Width width) {
switch (width) {
case Width::Word:
return 4;
case Width::DoubleWord:
return 8;
case Width::QuadWord:
return 16;
}
MOZ_CRASH("invalid width");
}
};
// Stack area indicates a contiguous stack allocation meant to receive call
// results that don't fit in registers.
class LStackArea : public LAllocation {
public:
explicit LStackArea(LInstruction* stackArea)
: LAllocation(STACK_AREA, stackArea) {}
// Byte index of base of stack area, in the same coordinate space as
// LStackSlot::slot().
inline uint32_t base() const;
inline void setBase(uint32_t base);
// Size in bytes of the stack area.
inline uint32_t size() const;
inline uint32_t alignment() const { return 8; }
class ResultIterator {
const LStackArea& alloc_;
uint32_t idx_;
public:
explicit ResultIterator(const LStackArea& alloc) : alloc_(alloc), idx_(0) {}
inline bool done() const;
inline void next();
inline LAllocation alloc() const;
inline bool isWasmAnyRef() const;
explicit operator bool() const { return !done(); }
};
ResultIterator results() const { return ResultIterator(*this); }
inline LStackSlot resultAlloc(LInstruction* lir, LDefinition* def) const;
};
// Arguments are reverse indices into the stack. The indices are byte indices.
class LArgument : public LAllocation {
public:
explicit LArgument(uint32_t index) : LAllocation(ARGUMENT_SLOT, index) {}
uint32_t index() const { return data(); }
};
inline uint32_t LAllocation::memorySlot() const {
MOZ_ASSERT(isMemory());
return isStackSlot() ? toStackSlot()->slot() : toArgument()->index();
}
// Represents storage for a definition.
class LDefinition {
// Bits containing policy, type, and virtual register.
uint32_t bits_;
// Before register allocation, this optionally contains a fixed policy.
// Register allocation assigns this field to a physical policy if none is
// fixed.
//
// Right now, pre-allocated outputs are limited to the following:
// * Physical argument stack slots.
// * Physical registers.
LAllocation output_;
static const uint32_t TYPE_BITS = 4;
static const uint32_t TYPE_SHIFT = 0;
static const uint32_t TYPE_MASK = (1 << TYPE_BITS) - 1;
static const uint32_t POLICY_BITS = 2;
static const uint32_t POLICY_SHIFT = TYPE_SHIFT + TYPE_BITS;
static const uint32_t POLICY_MASK = (1 << POLICY_BITS) - 1;
static const uint32_t VREG_BITS =
(sizeof(uint32_t) * 8) - (POLICY_BITS + TYPE_BITS);
static const uint32_t VREG_SHIFT = POLICY_SHIFT + POLICY_BITS;
static const uint32_t VREG_MASK = (1 << VREG_BITS) - 1;
public:
// Note that definitions, by default, are always allocated a register,
// unless the policy specifies that an input can be re-used and that input
// is a stack slot.
enum Policy {
// The policy is predetermined by the LAllocation attached to this
// definition. The allocation may be:
// * A register, which may not appear as any fixed temporary.
// * A stack slot or argument.
//
// Register allocation will not modify a fixed allocation.
FIXED,
// A random register of an appropriate class will be assigned.
REGISTER,
// An area on the stack must be assigned. Used when defining stack results
// and stack result areas.
STACK,
// One definition per instruction must re-use the first input
// allocation, which (for now) must be a register.
MUST_REUSE_INPUT
};
enum Type {
GENERAL, // Generic, integer or pointer-width data (GPR).
INT32, // int32 data (GPR).
OBJECT, // Pointer that may be collected as garbage (GPR).
SLOTS, // Slots/elements/wasm array data pointer that may be moved by minor
// GCs (GPR).
WASM_ANYREF, // Tagged pointer that may be collected as garbage (GPR).
FLOAT32, // 32-bit floating-point value (FPU).
DOUBLE, // 64-bit floating-point value (FPU).
SIMD128, // 128-bit SIMD vector (FPU).
STACKRESULTS, // A variable-size stack allocation that may contain objects.
#ifdef JS_NUNBOX32
// A type virtual register must be followed by a payload virtual
// register, as both will be tracked as a single gcthing.
TYPE,
PAYLOAD
#else
BOX // Joined box, for punbox systems. (GPR, gcthing)
#endif
};
void set(uint32_t index, Type type, Policy policy) {
static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
bits_ =
(index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
#ifndef ENABLE_WASM_SIMD
MOZ_ASSERT(this->type() != SIMD128);
#endif
}
public:
LDefinition(uint32_t index, Type type, Policy policy = REGISTER) {
set(index, type, policy);
}
explicit LDefinition(Type type, Policy policy = REGISTER) {
set(0, type, policy);
}
LDefinition(Type type, const LAllocation& a) : output_(a) {
set(0, type, FIXED);
}
LDefinition(uint32_t index, Type type, const LAllocation& a) : output_(a) {
set(index, type, FIXED);
}
LDefinition() : bits_(0) { MOZ_ASSERT(isBogusTemp()); }
static LDefinition BogusTemp() { return LDefinition(); }
Policy policy() const {
return (Policy)((bits_ >> POLICY_SHIFT) & POLICY_MASK);
}
Type type() const { return (Type)((bits_ >> TYPE_SHIFT) & TYPE_MASK); }
static bool isFloatRegCompatible(Type type, FloatRegister reg) {
#ifdef JS_CODEGEN_RISCV64
if (type == FLOAT32 || type == DOUBLE) {
return reg.isSingle() || reg.isDouble();
}
#else
if (type == FLOAT32) {
return reg.isSingle();
}
if (type == DOUBLE) {
return reg.isDouble();
}
#endif
MOZ_ASSERT(type == SIMD128);
return reg.isSimd128();
}
bool isCompatibleReg(const AnyRegister& r) const {
if (isFloatReg() && r.isFloat()) {
return isFloatRegCompatible(type(), r.fpu());
}
return !isFloatReg() && !r.isFloat();
}
bool isCompatibleDef(const LDefinition& other) const {
#if defined(JS_CODEGEN_ARM)
if (isFloatReg() && other.isFloatReg()) {
return type() == other.type();
}
return !isFloatReg() && !other.isFloatReg();
#else
return isFloatReg() == other.isFloatReg();
#endif
}
static bool isFloatReg(Type type) {
return type == FLOAT32 || type == DOUBLE || type == SIMD128;
}
bool isFloatReg() const { return isFloatReg(type()); }
uint32_t virtualRegister() const {
uint32_t index = (bits_ >> VREG_SHIFT) & VREG_MASK;
// MOZ_ASSERT(index != 0);
return index;
}
LAllocation* output() { return &output_; }
const LAllocation* output() const { return &output_; }
bool isFixed() const { return policy() == FIXED; }
bool isBogusTemp() const { return isFixed() && output()->isBogus(); }
void setVirtualRegister(uint32_t index) {
MOZ_ASSERT(index < VREG_MASK);
bits_ &= ~(VREG_MASK << VREG_SHIFT);
bits_ |= index << VREG_SHIFT;
}
void setOutput(const LAllocation& a) {
output_ = a;
if (!a.isUse()) {
bits_ &= ~(POLICY_MASK << POLICY_SHIFT);
bits_ |= FIXED << POLICY_SHIFT;
}
}
void setReusedInput(uint32_t operand) {
output_ = LConstantIndex::FromIndex(operand);
}
uint32_t getReusedInput() const {
MOZ_ASSERT(policy() == LDefinition::MUST_REUSE_INPUT);
return output_.toConstantIndex()->index();
}
// Returns true if this definition should be added to safepoints for GC
// tracing. This includes Value type tags on 32-bit and slots/elements
// pointers.
inline bool isSafepointGCType(LNode* ins) const;
static inline Type TypeFrom(MIRType type) {
switch (type) {
case MIRType::Boolean:
case MIRType::Int32:
// The stack slot allocator doesn't currently support allocating
// 1-byte slots, so for now we lower MIRType::Boolean into INT32.
static_assert(sizeof(bool) <= sizeof(int32_t),
"bool doesn't fit in an int32 slot");
return LDefinition::INT32;
case MIRType::String:
case MIRType::Symbol:
case MIRType::BigInt:
case MIRType::Object:
return LDefinition::OBJECT;
case MIRType::Double:
return LDefinition::DOUBLE;
case MIRType::Float32:
return LDefinition::FLOAT32;
#if defined(JS_PUNBOX64)
case MIRType::Value:
return LDefinition::BOX;
#endif
case MIRType::Slots:
case MIRType::Elements:
case MIRType::WasmArrayData:
return LDefinition::SLOTS;
case MIRType::WasmAnyRef:
return LDefinition::WASM_ANYREF;
case MIRType::Pointer:
case MIRType::IntPtr:
return LDefinition::GENERAL;
#if defined(JS_PUNBOX64)
case MIRType::Int64:
return LDefinition::GENERAL;
#endif
case MIRType::StackResults:
return LDefinition::STACKRESULTS;
case MIRType::Simd128:
return LDefinition::SIMD128;
default:
MOZ_CRASH("unexpected type");
}
}
UniqueChars toString() const;
#ifdef JS_JITSPEW
void dump() const;
#endif
};
class LInt64Definition : public LInt64Value<LDefinition> {
public:
using LInt64Value<LDefinition>::LInt64Value;
static LInt64Definition BogusTemp() { return LInt64Definition(); }
bool isBogusTemp() const {
#if JS_BITS_PER_WORD == 32
MOZ_ASSERT(high().isBogusTemp() == low().isBogusTemp());
return high().isBogusTemp();
#else
return value().isBogusTemp();
#endif
}
};
template <>
inline LStackSlot::Width LStackSlot::width(LDefinition::Type type) {
switch (type) {
#if JS_BITS_PER_WORD == 32
case LDefinition::GENERAL:
case LDefinition::OBJECT:
case LDefinition::SLOTS:
case LDefinition::WASM_ANYREF:
#endif
#ifdef JS_NUNBOX32
case LDefinition::TYPE:
case LDefinition::PAYLOAD:
#endif
case LDefinition::INT32:
case LDefinition::FLOAT32:
return LStackSlot::Word;
#if JS_BITS_PER_WORD == 64
case LDefinition::GENERAL:
case LDefinition::OBJECT:
case LDefinition::SLOTS:
case LDefinition::WASM_ANYREF:
#endif
#ifdef JS_PUNBOX64
case LDefinition::BOX:
#endif
case LDefinition::DOUBLE:
return LStackSlot::DoubleWord;
case LDefinition::SIMD128:
return LStackSlot::QuadWord;
case LDefinition::STACKRESULTS:
MOZ_CRASH("Stack results area must be allocated manually");
}
MOZ_CRASH("Unknown slot type");
}
// Forward declarations of LIR types.
#define LIROP(op) class L##op;
LIR_OPCODE_LIST(LIROP)
#undef LIROP
class LSnapshot;
class LSafepoint;
class LElementVisitor;
constexpr size_t MaxNumLInstructionOperands = 63;
// The common base class for LPhi and LInstruction.
class LNode {
protected:
MDefinition* mir_;
private:
LBlock* block_;
uint32_t id_;
protected:
// Bitfields below are all uint32_t to make sure MSVC packs them correctly.
uint32_t op_ : 10;
uint32_t isCall_ : 1;
// LPhi::numOperands() may not fit in this bitfield, so we only use this
// field for LInstruction.
uint32_t nonPhiNumOperands_ : 6;
static_assert((1 << 6) - 1 == MaxNumLInstructionOperands,
"packing constraints");
// For LInstruction, the first operand is stored at offset
// sizeof(LInstruction) + nonPhiOperandsOffset_ * sizeof(uintptr_t).
uint32_t nonPhiOperandsOffset_ : 5;
uint32_t numDefs_ : 4;
uint32_t numTemps_ : 4;
public:
enum class Opcode {
#define LIROP(name) name,
LIR_OPCODE_LIST(LIROP)
#undef LIROP
Invalid
};
LNode(Opcode op, uint32_t nonPhiNumOperands, uint32_t numDefs,
uint32_t numTemps)
: mir_(nullptr),
block_(nullptr),
id_(0),
op_(uint32_t(op)),
isCall_(false),
nonPhiNumOperands_(nonPhiNumOperands),
nonPhiOperandsOffset_(0),
numDefs_(numDefs),
numTemps_(numTemps) {
MOZ_ASSERT(op < Opcode::Invalid);
MOZ_ASSERT(op_ == uint32_t(op), "opcode must fit in bitfield");
MOZ_ASSERT(nonPhiNumOperands_ == nonPhiNumOperands,
"nonPhiNumOperands must fit in bitfield");
MOZ_ASSERT(numDefs_ == numDefs, "numDefs must fit in bitfield");
MOZ_ASSERT(numTemps_ == numTemps, "numTemps must fit in bitfield");
}
const char* opName() {
switch (op()) {
#define LIR_NAME_INS(name) \
case Opcode::name: \
return #name;