Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/arm/Assembler-arm.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Maybe.h"
#include "mozilla/Sprintf.h"
#include <type_traits>
#include "gc/Marking.h"
#include "jit/arm/disasm/Disasm-arm.h"
#include "jit/arm/MacroAssembler-arm.h"
#include "jit/AutoWritableJitCode.h"
#include "jit/ExecutableAllocator.h"
#include "jit/MacroAssembler.h"
#include "vm/Realm.h"
using namespace js;
using namespace js::jit;
using mozilla::CountLeadingZeroes32;
using mozilla::DebugOnly;
using LabelDoc = DisassemblerSpew::LabelDoc;
using LiteralDoc = DisassemblerSpew::LiteralDoc;
void dbg_break() {}
// The ABIArgGenerator is used for making system ABI calls and for inter-wasm
// calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls
// are always HardFp calls. The initialization defaults to HardFp, and the ABI
// choice is made before any system ABI calls with the method "setUseHardFp".
ABIArgGenerator::ABIArgGenerator()
: intRegIndex_(0),
floatRegIndex_(0),
stackOffset_(0),
current_(),
useHardFp_(true) {}
// See the "Parameter Passing" section of the "Procedure Call Standard for the
// ARM Architecture" documentation.
ABIArg ABIArgGenerator::softNext(MIRType type) {
switch (type) {
case MIRType::Int32:
case MIRType::Pointer:
case MIRType::RefOrNull:
case MIRType::StackResults:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType::Int64:
// Make sure to use an even register index. Increase to next even number
// when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1;
if (intRegIndex_ == NumIntArgRegs) {
// Align the stack on 8 bytes.
static const uint32_t align = sizeof(uint64_t) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_),
Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2;
break;
case MIRType::Float32:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType::Double:
// Make sure to use an even register index. Increase to next even number
// when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1;
if (intRegIndex_ == NumIntArgRegs) {
// Align the stack on 8 bytes.
static const uint32_t align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(double);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_),
Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2;
break;
default:
MOZ_CRASH("Unexpected argument type");
}
return current_;
}
ABIArg ABIArgGenerator::hardNext(MIRType type) {
switch (type) {
case MIRType::Int32:
case MIRType::Pointer:
case MIRType::RefOrNull:
case MIRType::StackResults:
if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++;
break;
case MIRType::Int64:
// Make sure to use an even register index. Increase to next even number
// when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1;
if (intRegIndex_ == NumIntArgRegs) {
// Align the stack on 8 bytes.
static const uint32_t align = sizeof(uint64_t) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_),
Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2;
break;
case MIRType::Float32:
if (floatRegIndex_ == NumFloatArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t);
break;
}
current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
floatRegIndex_++;
break;
case MIRType::Double:
// Double register are composed of 2 float registers, thus we have to
// skip any float register which cannot be used in a pair of float
// registers in which a double value can be stored.
floatRegIndex_ = (floatRegIndex_ + 1) & ~1;
if (floatRegIndex_ == NumFloatArgRegs) {
static const uint32_t align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t);
break;
}
current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
floatRegIndex_ += 2;
break;
default:
MOZ_CRASH("Unexpected argument type");
}
return current_;
}
ABIArg ABIArgGenerator::next(MIRType type) {
if (useHardFp_) {
return hardNext(type);
}
return softNext(type);
}
bool js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access) {
if (!access.align()) {
return false;
}
if (access.type() == Scalar::Float64 && access.align() >= 4) {
return false;
}
return access.align() < access.byteSize();
}
// Encode a standard register when it is being used as src1, the dest, and an
// extra register. These should never be called with an InvalidReg.
uint32_t js::jit::RT(Register r) {
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 12;
}
uint32_t js::jit::RN(Register r) {
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 16;
}
uint32_t js::jit::RD(Register r) {
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 12;
}
uint32_t js::jit::RM(Register r) {
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 8;
}
// Encode a standard register when it is being used as src1, the dest, and an
// extra register. For these, an InvalidReg is used to indicate a optional
// register that has been omitted.
uint32_t js::jit::maybeRT(Register r) {
if (r == InvalidReg) {
return 0;
}
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 12;
}
uint32_t js::jit::maybeRN(Register r) {
if (r == InvalidReg) {
return 0;
}
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 16;
}
uint32_t js::jit::maybeRD(Register r) {
if (r == InvalidReg) {
return 0;
}
MOZ_ASSERT((r.code() & ~0xf) == 0);
return r.code() << 12;
}
Register js::jit::toRD(Instruction i) {
return Register::FromCode((i.encode() >> 12) & 0xf);
}
Register js::jit::toR(Instruction i) {
return Register::FromCode(i.encode() & 0xf);
}
Register js::jit::toRM(Instruction i) {
return Register::FromCode((i.encode() >> 8) & 0xf);
}
Register js::jit::toRN(Instruction i) {
return Register::FromCode((i.encode() >> 16) & 0xf);
}
uint32_t js::jit::VD(VFPRegister vr) {
if (vr.isMissing()) {
return 0;
}
// Bits 15,14,13,12, 22.
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 22 | s.block << 12;
}
uint32_t js::jit::VN(VFPRegister vr) {
if (vr.isMissing()) {
return 0;
}
// Bits 19,18,17,16, 7.
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 7 | s.block << 16;
}
uint32_t js::jit::VM(VFPRegister vr) {
if (vr.isMissing()) {
return 0;
}
// Bits 5, 3,2,1,0.
VFPRegister::VFPRegIndexSplit s = vr.encode();
return s.bit << 5 | s.block;
}
VFPRegister::VFPRegIndexSplit jit::VFPRegister::encode() {
MOZ_ASSERT(!_isInvalid);
switch (kind) {
case Double:
return VFPRegIndexSplit(code_ & 0xf, code_ >> 4);
case Single:
return VFPRegIndexSplit(code_ >> 1, code_ & 1);
default:
// VFP register treated as an integer, NOT a gpr.
return VFPRegIndexSplit(code_ >> 1, code_ & 1);
}
}
bool InstDTR::IsTHIS(const Instruction& i) {
return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
}
InstDTR* InstDTR::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstDTR*)&i;
}
return nullptr;
}
bool InstLDR::IsTHIS(const Instruction& i) {
return (i.encode() & IsDTRMask) == (uint32_t)IsDTR;
}
InstLDR* InstLDR::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstLDR*)&i;
}
return nullptr;
}
InstNOP* InstNOP::AsTHIS(Instruction& i) {
if (IsTHIS(i)) {
return (InstNOP*)&i;
}
return nullptr;
}
bool InstNOP::IsTHIS(const Instruction& i) {
return (i.encode() & 0x0fffffff) == NopInst;
}
bool InstBranchReg::IsTHIS(const Instruction& i) {
return InstBXReg::IsTHIS(i) || InstBLXReg::IsTHIS(i);
}
InstBranchReg* InstBranchReg::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBranchReg*)&i;
}
return nullptr;
}
void InstBranchReg::extractDest(Register* dest) { *dest = toR(*this); }
bool InstBranchReg::checkDest(Register dest) { return dest == toR(*this); }
bool InstBranchImm::IsTHIS(const Instruction& i) {
return InstBImm::IsTHIS(i) || InstBLImm::IsTHIS(i);
}
InstBranchImm* InstBranchImm::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBranchImm*)&i;
}
return nullptr;
}
void InstBranchImm::extractImm(BOffImm* dest) { *dest = BOffImm(*this); }
bool InstBXReg::IsTHIS(const Instruction& i) {
return (i.encode() & IsBRegMask) == IsBX;
}
InstBXReg* InstBXReg::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBXReg*)&i;
}
return nullptr;
}
bool InstBLXReg::IsTHIS(const Instruction& i) {
return (i.encode() & IsBRegMask) == IsBLX;
}
InstBLXReg* InstBLXReg::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBLXReg*)&i;
}
return nullptr;
}
bool InstBImm::IsTHIS(const Instruction& i) {
return (i.encode() & IsBImmMask) == IsB;
}
InstBImm* InstBImm::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBImm*)&i;
}
return nullptr;
}
bool InstBLImm::IsTHIS(const Instruction& i) {
return (i.encode() & IsBImmMask) == IsBL;
}
InstBLImm* InstBLImm::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstBLImm*)&i;
}
return nullptr;
}
bool InstMovWT::IsTHIS(Instruction& i) {
return InstMovW::IsTHIS(i) || InstMovT::IsTHIS(i);
}
InstMovWT* InstMovWT::AsTHIS(Instruction& i) {
if (IsTHIS(i)) {
return (InstMovWT*)&i;
}
return nullptr;
}
void InstMovWT::extractImm(Imm16* imm) { *imm = Imm16(*this); }
bool InstMovWT::checkImm(Imm16 imm) {
return imm.decode() == Imm16(*this).decode();
}
void InstMovWT::extractDest(Register* dest) { *dest = toRD(*this); }
bool InstMovWT::checkDest(Register dest) { return dest == toRD(*this); }
bool InstMovW::IsTHIS(const Instruction& i) {
return (i.encode() & IsWTMask) == IsW;
}
InstMovW* InstMovW::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstMovW*)&i;
}
return nullptr;
}
InstMovT* InstMovT::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstMovT*)&i;
}
return nullptr;
}
bool InstMovT::IsTHIS(const Instruction& i) {
return (i.encode() & IsWTMask) == IsT;
}
InstALU* InstALU::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstALU*)&i;
}
return nullptr;
}
bool InstALU::IsTHIS(const Instruction& i) {
return (i.encode() & ALUMask) == 0;
}
void InstALU::extractOp(ALUOp* ret) { *ret = ALUOp(encode() & (0xf << 21)); }
bool InstALU::checkOp(ALUOp op) {
ALUOp mine;
extractOp(&mine);
return mine == op;
}
void InstALU::extractDest(Register* ret) { *ret = toRD(*this); }
bool InstALU::checkDest(Register rd) { return rd == toRD(*this); }
void InstALU::extractOp1(Register* ret) { *ret = toRN(*this); }
bool InstALU::checkOp1(Register rn) { return rn == toRN(*this); }
Operand2 InstALU::extractOp2() { return Operand2(encode()); }
InstCMP* InstCMP::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstCMP*)&i;
}
return nullptr;
}
bool InstCMP::IsTHIS(const Instruction& i) {
return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkDest(r0) &&
InstALU::AsTHIS(i)->checkOp(OpCmp);
}
InstMOV* InstMOV::AsTHIS(const Instruction& i) {
if (IsTHIS(i)) {
return (InstMOV*)&i;
}
return nullptr;
}
bool InstMOV::IsTHIS(const Instruction& i) {
return InstALU::IsTHIS(i) && InstALU::AsTHIS(i)->checkOp1(r0) &&
InstALU::AsTHIS(i)->checkOp(OpMov);
}
Op2Reg Operand2::toOp2Reg() const { return *(Op2Reg*)this; }
Imm16::Imm16(Instruction& inst)
: lower_(inst.encode() & 0xfff),
upper_(inst.encode() >> 16),
invalid_(0xfff) {}
Imm16::Imm16(uint32_t imm)
: lower_(imm & 0xfff), pad_(0), upper_((imm >> 12) & 0xf), invalid_(0) {
MOZ_ASSERT(decode() == imm);
}
Imm16::Imm16() : invalid_(0xfff) {}
void Assembler::finish() {
flush();
MOZ_ASSERT(!isFinished);
isFinished = true;
}
bool Assembler::appendRawCode(const uint8_t* code, size_t numBytes) {
flush();
return m_buffer.appendRawCode(code, numBytes);
}
bool Assembler::reserve(size_t size) {
// This buffer uses fixed-size chunks so there's no point in reserving
// now vs. on-demand.
return !oom();
}
bool Assembler::swapBuffer(wasm::Bytes& bytes) {
// For now, specialize to the one use case. As long as wasm::Bytes is a
// Vector, not a linked-list of chunks, there's not much we can do other
// than copy.
MOZ_ASSERT(bytes.empty());
if (!bytes.resize(bytesNeeded())) {
return false;
}
m_buffer.executableCopy(bytes.begin());
return true;
}
void Assembler::executableCopy(uint8_t* buffer) {
MOZ_ASSERT(isFinished);
m_buffer.executableCopy(buffer);
}
class RelocationIterator {
CompactBufferReader reader_;
// Offset in bytes.
uint32_t offset_;
public:
explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
bool read() {
if (!reader_.more()) {
return false;
}
offset_ = reader_.readUnsigned();
return true;
}
uint32_t offset() const { return offset_; }
};
template <class Iter>
const uint32_t* Assembler::GetCF32Target(Iter* iter) {
Instruction* inst1 = iter->cur();
if (inst1->is<InstBranchImm>()) {
// See if we have a simple case, b #offset.
BOffImm imm;
InstBranchImm* jumpB = inst1->as<InstBranchImm>();
jumpB->extractImm(&imm);
return imm.getDest(inst1)->raw();
}
if (inst1->is<InstMovW>()) {
// See if we have the complex case:
// movw r_temp, #imm1
// movt r_temp, #imm2
// bx r_temp
// OR
// movw r_temp, #imm1
// movt r_temp, #imm2
// str pc, [sp]
// bx r_temp
Imm16 targ_bot;
Imm16 targ_top;
Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW* bottom = inst1->as<InstMovW>();
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// Extract the top part of the immediate.
Instruction* inst2 = iter->next();
MOZ_ASSERT(inst2->is<InstMovT>());
InstMovT* top = inst2->as<InstMovT>();
top->extractImm(&targ_top);
// Make sure they are being loaded into the same register.
MOZ_ASSERT(top->checkDest(temp));
// Make sure we're branching to the same register.
#ifdef DEBUG
// A toggled call sometimes has a NOP instead of a branch for the third
// instruction. No way to assert that it's valid in that situation.
Instruction* inst3 = iter->next();
if (!inst3->is<InstNOP>()) {
InstBranchReg* realBranch = nullptr;
if (inst3->is<InstBranchReg>()) {
realBranch = inst3->as<InstBranchReg>();
} else {
Instruction* inst4 = iter->next();
realBranch = inst4->as<InstBranchReg>();
}
MOZ_ASSERT(realBranch->checkDest(temp));
}
#endif
uint32_t* dest = (uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
return dest;
}
if (inst1->is<InstLDR>()) {
return *(uint32_t**)inst1->as<InstLDR>()->dest();
}
MOZ_CRASH("unsupported branch relocation");
}
uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
InstructionIterator iter((Instruction*)instPtr);
uintptr_t ret = (uintptr_t)GetPtr32Target(iter, nullptr, nullptr);
return ret;
}
const uint32_t* Assembler::GetPtr32Target(InstructionIterator start,
Register* dest, RelocStyle* style) {
Instruction* load1 = start.cur();
Instruction* load2 = start.next();
if (load1->is<InstMovW>() && load2->is<InstMovT>()) {
if (style) {
*style = L_MOVWT;
}
// See if we have the complex case:
// movw r_temp, #imm1
// movt r_temp, #imm2
Imm16 targ_bot;
Imm16 targ_top;
Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW* bottom = load1->as<InstMovW>();
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// Extract the top part of the immediate.
InstMovT* top = load2->as<InstMovT>();
top->extractImm(&targ_top);
// Make sure they are being loaded into the same register.
MOZ_ASSERT(top->checkDest(temp));
if (dest) {
*dest = temp;
}
uint32_t* value =
(uint32_t*)(targ_bot.decode() | (targ_top.decode() << 16));
return value;
}
if (load1->is<InstLDR>()) {
if (style) {
*style = L_LDR;
}
if (dest) {
*dest = toRD(*load1);
}
return *(uint32_t**)load1->as<InstLDR>()->dest();
}
MOZ_CRASH("unsupported relocation");
}
static JitCode* CodeFromJump(InstructionIterator* jump) {
uint8_t* target = (uint8_t*)Assembler::GetCF32Target(jump);
return JitCode::FromExecutable(target);
}
void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
CompactBufferReader& reader) {
RelocationIterator iter(reader);
while (iter.read()) {
InstructionIterator institer((Instruction*)(code->raw() + iter.offset()));
JitCode* child = CodeFromJump(&institer);
TraceManuallyBarrieredEdge(trc, &child, "rel32");
}
}
static void TraceOneDataRelocation(JSTracer* trc,
mozilla::Maybe<AutoWritableJitCode>& awjc,
JitCode* code, InstructionIterator iter) {
Register dest;
Assembler::RelocStyle rs;
const void* prior = Assembler::GetPtr32Target(iter, &dest, &rs);
void* ptr = const_cast<void*>(prior);
// No barrier needed since these are constants.
TraceManuallyBarrieredGenericPointerEdge(
trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
if (ptr != prior) {
if (awjc.isNothing()) {
awjc.emplace(code);
}
MacroAssemblerARM::ma_mov_patch(Imm32(int32_t(ptr)), dest,
Assembler::Always, rs, iter);
}
}
/* static */
void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
CompactBufferReader& reader) {
mozilla::Maybe<AutoWritableJitCode> awjc;
while (reader.more()) {
size_t offset = reader.readUnsigned();
InstructionIterator iter((Instruction*)(code->raw() + offset));
TraceOneDataRelocation(trc, awjc, code, iter);
}
}
void Assembler::copyJumpRelocationTable(uint8_t* dest) {
if (jumpRelocations_.length()) {
memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
}
}
void Assembler::copyDataRelocationTable(uint8_t* dest) {
if (dataRelocations_.length()) {
memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
}
}
void Assembler::processCodeLabels(uint8_t* rawCode) {
for (const CodeLabel& label : codeLabels_) {
Bind(rawCode, label);
}
}
void Assembler::writeCodePointer(CodeLabel* label) {
m_buffer.assertNoPoolAndNoNops();
BufferOffset off = writeInst(-1);
label->patchAt()->bind(off.getOffset());
}
void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
size_t offset = label.patchAt().offset();
size_t target = label.target().offset();
*reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
}
Assembler::Condition Assembler::InvertCondition(Condition cond) {
const uint32_t ConditionInversionBit = 0x10000000;
return Condition(ConditionInversionBit ^ cond);
}
Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
switch (cond) {
case Zero:
case NonZero:
return cond;
case LessThan:
case Below:
return Below;
case LessThanOrEqual:
case BelowOrEqual:
return BelowOrEqual;
case GreaterThan:
case Above:
return Above;
case AboveOrEqual:
case GreaterThanOrEqual:
return AboveOrEqual;
default:
MOZ_CRASH("unexpected condition");
}
}
Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
switch (cond) {
case LessThan:
case LessThanOrEqual:
return LessThan;
case Below:
case BelowOrEqual:
return Below;
case GreaterThan:
case GreaterThanOrEqual:
return GreaterThan;
case Above:
case AboveOrEqual:
return Above;
default:
MOZ_CRASH("unexpected condition");
}
}
Assembler::DoubleCondition Assembler::InvertCondition(DoubleCondition cond) {
const uint32_t ConditionInversionBit = 0x10000000;
return DoubleCondition(ConditionInversionBit ^ cond);
}
Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) {
// In the ideal case, we are looking for a number that (in binary) looks
// like:
// 0b((00)*)n_1((00)*)n_2((00)*)
// left n1 mid n2
// where both n_1 and n_2 fit into 8 bits.
// Since this is being done with rotates, we also need to handle the case
// that one of these numbers is in fact split between the left and right
// sides, in which case the constant will look like:
// 0bn_1a((00)*)n_2((00)*)n_1b
// n1a mid n2 rgh n1b
// Also remember, values are rotated by multiples of two, and left, mid or
// right can have length zero.
uint32_t imm1, imm2;
int left = CountLeadingZeroes32(imm) & 0x1E;
uint32_t no_n1 = imm & ~(0xff << (24 - left));
// Not technically needed: this case only happens if we can encode as a
// single imm8m. There is a perfectly reasonable encoding in this case, but
// we shouldn't encourage people to do things like this.
if (no_n1 == 0) {
return TwoImm8mData();
}
int mid = CountLeadingZeroes32(no_n1) & 0x1E;
uint32_t no_n2 =
no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f));
if (no_n2 == 0) {
// We hit the easy case, no wraparound.
// Note: a single constant *may* look like this.
int imm1shift = left + 8;
int imm2shift = mid + 8;
imm1 = (imm >> (32 - imm1shift)) & 0xff;
if (imm2shift >= 32) {
imm2shift = 0;
// This assert does not always hold, in fact, this would lead to
// some incredibly subtle bugs.
// assert((imm & 0xff) == no_n1);
imm2 = no_n1;
} else {
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
}
MOZ_ASSERT((imm1shift & 0x1) == 0);
MOZ_ASSERT((imm2shift & 0x1) == 0);
return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
// Either it wraps, or it does not fit. If we initially chopped off more
// than 8 bits, then it won't fit.
if (left >= 8) {
return TwoImm8mData();
}
int right = 32 - (CountLeadingZeroes32(no_n2) & 30);
// All remaining set bits *must* fit into the lower 8 bits.
// The right == 8 case should be handled by the previous case.
if (right > 8) {
return TwoImm8mData();
}
// Make sure the initial bits that we removed for no_n1 fit into the
// 8-(32-right) leftmost bits.
if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) {
// BUT we may have removed more bits than we needed to for no_n1
// 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001
// with a second, but we try to encode 0x0410000 and find that we need a
// second op for 0x4000, and 0x1 cannot be included in the encoding of
// 0x04100000.
no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
mid = CountLeadingZeroes32(no_n1) & 30;
no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31));
if (no_n2 != 0) {
return TwoImm8mData();
}
}
// Now assemble all of this information into a two coherent constants it is
// a rotate right from the lower 8 bits.
int imm1shift = 8 - right;
imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
MOZ_ASSERT((imm1shift & ~0x1e) == 0);
// left + 8 + mid is the position of the leftmost bit of n_2.
// We needed to rotate 0x000000ab right by 8 in order to get 0xab000000,
// then shift again by the leftmost bit in order to get the constant that we
// care about.
int imm2shift = mid + 8;
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
MOZ_ASSERT((imm1shift & 0x1) == 0);
MOZ_ASSERT((imm2shift & 0x1) == 0);
return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm,
Register* negDest) {
// Find an alternate ALUOp to get the job done, and use a different imm.
*negDest = dest;
switch (op) {
case OpMov:
*imm = Imm32(~imm->value);
return OpMvn;
case OpMvn:
*imm = Imm32(~imm->value);
return OpMov;
case OpAnd:
*imm = Imm32(~imm->value);
return OpBic;
case OpBic:
*imm = Imm32(~imm->value);
return OpAnd;
case OpAdd:
*imm = Imm32(-imm->value);
return OpSub;
case OpSub:
*imm = Imm32(-imm->value);
return OpAdd;
case OpCmp:
*imm = Imm32(-imm->value);
return OpCmn;
case OpCmn:
*imm = Imm32(-imm->value);
return OpCmp;
case OpTst:
MOZ_ASSERT(dest == InvalidReg);
*imm = Imm32(~imm->value);
*negDest = scratch;
return OpBic;
// orr has orn on thumb2 only.
default:
return OpInvalid;
}
}
bool jit::can_dbl(ALUOp op) {
// Some instructions can't be processed as two separate instructions such as
// and, and possibly add (when we're setting ccodes). There is also some
// hilarity with *reading* condition codes. For example, adc dest, src1,
// 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add
// dest, dest, 0xff, since "reading" the condition code increments the
// result by one conditionally, that only needs to be done on one of the two
// instructions.
switch (op) {
case OpBic:
case OpAdd:
case OpSub:
case OpEor:
case OpOrr:
return true;
default:
return false;
}
}
bool jit::condsAreSafe(ALUOp op) {
// Even when we are setting condition codes, sometimes we can get away with
// splitting an operation into two. For example, if our immediate is
// 0x00ff00ff, and the operation is eors we can split this in half, since x
// ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly
// the same as x ^ 0x00ff00ff. However, if the operation were adds, we
// cannot split this in half. If the source on the add is 0xfff00ff0, the
// result sholud be 0xef10ef, but do we set the overflow bit or not?
// Depending on which half is performed first (0x00ff0000 or 0x000000ff) the
// V bit will be set differently, and *not* updating the V bit would be
// wrong. Theoretically, the following should work:
// adds r0, r1, 0x00ff0000;
// addsvs r0, r1, 0x000000ff;
// addvc r0, r1, 0x000000ff;
// But this is 3 instructions, and at that point, we might as well use
// something else.
switch (op) {
case OpBic:
case OpOrr:
case OpEor:
return true;
default:
return false;
}
}
ALUOp jit::getDestVariant(ALUOp op) {
// All of the compare operations are dest-less variants of a standard
// operation. Given the dest-less variant, return the dest-ful variant.
switch (op) {
case OpCmp:
return OpSub;
case OpCmn:
return OpAdd;
case OpTst:
return OpAnd;
case OpTeq:
return OpEor;
default:
return op;
}
}
O2RegImmShift jit::O2Reg(Register r) { return O2RegImmShift(r, LSL, 0); }
O2RegImmShift jit::lsl(Register r, int amt) {
MOZ_ASSERT(0 <= amt && amt <= 31);
return O2RegImmShift(r, LSL, amt);
}
O2RegImmShift jit::lsr(Register r, int amt) {
MOZ_ASSERT(1 <= amt && amt <= 32);
return O2RegImmShift(r, LSR, amt);
}
O2RegImmShift jit::ror(Register r, int amt) {
MOZ_ASSERT(1 <= amt && amt <= 31);
return O2RegImmShift(r, ROR, amt);
}
O2RegImmShift jit::rol(Register r, int amt) {
MOZ_ASSERT(1 <= amt && amt <= 31);
return O2RegImmShift(r, ROR, 32 - amt);
}
O2RegImmShift jit::asr(Register r, int amt) {
MOZ_ASSERT(1 <= amt && amt <= 32);
return O2RegImmShift(r, ASR, amt);
}
O2RegRegShift jit::lsl(Register r, Register amt) {
return O2RegRegShift(r, LSL, amt);
}
O2RegRegShift jit::lsr(Register r, Register amt) {
return O2RegRegShift(r, LSR, amt);
}
O2RegRegShift jit::ror(Register r, Register amt) {
return O2RegRegShift(r, ROR, amt);
}
O2RegRegShift jit::asr(Register r, Register amt) {
return O2RegRegShift(r, ASR, amt);
}
static js::jit::DoubleEncoder doubleEncoder;
/* static */
const js::jit::VFPImm js::jit::VFPImm::One(0x3FF00000);
js::jit::VFPImm::VFPImm(uint32_t top) {
data_ = -1;
datastore::Imm8VFPImmData tmp;
if (doubleEncoder.lookup(top, &tmp)) {
data_ = tmp.encode();
}
}
BOffImm::BOffImm(const Instruction& inst) : data_(inst.encode() & 0x00ffffff) {}
Instruction* BOffImm::getDest(Instruction* src) const {
// TODO: It is probably worthwhile to verify that src is actually a branch.
// NOTE: This does not explicitly shift the offset of the destination left by
// 2, since it is indexing into an array of instruction sized objects.
return &src[((int32_t(data_) << 8) >> 8) + 2];
}
const js::jit::DoubleEncoder::DoubleEntry js::jit::DoubleEncoder::table[256] = {
#include "jit/arm/DoubleEntryTable.tbl"
};
// VFPRegister implementation
VFPRegister VFPRegister::doubleOverlay(unsigned int which) const {
MOZ_ASSERT(!_isInvalid);
MOZ_ASSERT(which == 0);
if (kind != Double) {
return VFPRegister(code_ >> 1, Double);
}
return *this;
}
VFPRegister VFPRegister::singleOverlay(unsigned int which) const {
MOZ_ASSERT(!_isInvalid);
if (kind == Double) {
// There are no corresponding float registers for d16-d31.
MOZ_ASSERT(code_ < 16);
MOZ_ASSERT(which < 2);
return VFPRegister((code_ << 1) + which, Single);
}
MOZ_ASSERT(which == 0);
return VFPRegister(code_, Single);
}
static_assert(
FloatRegisters::TotalDouble <= 16,
"We assume that every Double register also has an Integer personality");
VFPRegister VFPRegister::sintOverlay(unsigned int which) const {
MOZ_ASSERT(!_isInvalid);
if (kind == Double) {
// There are no corresponding float registers for d16-d31.
MOZ_ASSERT(code_ < 16);
MOZ_ASSERT(which < 2);
return VFPRegister((code_ << 1) + which, Int);
}
MOZ_ASSERT(which == 0);
return VFPRegister(code_, Int);
}
VFPRegister VFPRegister::uintOverlay(unsigned int which) const {
MOZ_ASSERT(!_isInvalid);
if (kind == Double) {
// There are no corresponding float registers for d16-d31.
MOZ_ASSERT(code_ < 16);
MOZ_ASSERT(which < 2);
return VFPRegister((code_ << 1) + which, UInt);
}
MOZ_ASSERT(which == 0);
return VFPRegister(code_, UInt);
}
bool Assembler::oom() const {
return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
dataRelocations_.oom();
}
// Size of the instruction stream, in bytes. Including pools. This function
// expects all pools that need to be placed have been placed. If they haven't
// then we need to go an flush the pools :(
size_t Assembler::size() const { return m_buffer.size(); }
// Size of the relocation table, in bytes.
size_t Assembler::jumpRelocationTableBytes() const {
return jumpRelocations_.length();
}
size_t Assembler::dataRelocationTableBytes() const {
return dataRelocations_.length();
}
// Size of the data table, in bytes.
size_t Assembler::bytesNeeded() const {
return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
}
// Allocate memory for a branch instruction, it will be overwritten
// subsequently and should not be disassembled.
BufferOffset Assembler::allocBranchInst() {
return m_buffer.putInt(Always | InstNOP::NopInst);
}
void Assembler::WriteInstStatic(uint32_t x, uint32_t* dest) {
MOZ_ASSERT(dest != nullptr);
*dest = x;
}
void Assembler::haltingAlign(int alignment) {
// HLT with payload 0xBAAD
m_buffer.align(alignment, 0xE1000070 | (0xBAA << 8) | 0xD);
}
void Assembler::nopAlign(int alignment) { m_buffer.align(alignment); }
BufferOffset Assembler::as_nop() { return writeInst(0xe320f000); }
static uint32_t EncodeAlu(Register dest, Register src1, Operand2 op2, ALUOp op,
SBit s, Assembler::Condition c) {
return (int)op | (int)s | (int)c | op2.encode() |
((dest == InvalidReg) ? 0 : RD(dest)) |
((src1 == InvalidReg) ? 0 : RN(src1));
}
BufferOffset Assembler::as_alu(Register dest, Register src1, Operand2 op2,
ALUOp op, SBit s, Condition c) {
return writeInst(EncodeAlu(dest, src1, op2, op, s, c));
}
BufferOffset Assembler::as_mov(Register dest, Operand2 op2, SBit s,
Condition c) {
return as_alu(dest, InvalidReg, op2, OpMov, s, c);
}
/* static */
void Assembler::as_alu_patch(Register dest, Register src1, Operand2 op2,
ALUOp op, SBit s, Condition c, uint32_t* pos) {
WriteInstStatic(EncodeAlu(dest, src1, op2, op, s, c), pos);
}
/* static */
void Assembler::as_mov_patch(Register dest, Operand2 op2, SBit s, Condition c,
uint32_t* pos) {
as_alu_patch(dest, InvalidReg, op2, OpMov, s, c, pos);
}
BufferOffset Assembler::as_mvn(Register dest, Operand2 op2, SBit s,
Condition c) {
return as_alu(dest, InvalidReg, op2, OpMvn, s, c);
}
// Logical operations.
BufferOffset Assembler::as_and(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpAnd, s, c);
}
BufferOffset Assembler::as_bic(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpBic, s, c);
}
BufferOffset Assembler::as_eor(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpEor, s, c);
}
BufferOffset Assembler::as_orr(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpOrr, s, c);
}
// Reverse byte operations.
BufferOffset Assembler::as_rev(Register dest, Register src, Condition c) {
return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'0011'0000 |
RD(dest) | src.code());
}
BufferOffset Assembler::as_rev16(Register dest, Register src, Condition c) {
return writeInst((int)c | 0b0000'0110'1011'1111'0000'1111'1011'0000 |
RD(dest) | src.code());
}
BufferOffset Assembler::as_revsh(Register dest, Register src, Condition c) {
return writeInst((int)c | 0b0000'0110'1111'1111'0000'1111'1011'0000 |
RD(dest) | src.code());
}
// Mathematical operations.
BufferOffset Assembler::as_adc(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpAdc, s, c);
}
BufferOffset Assembler::as_add(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpAdd, s, c);
}
BufferOffset Assembler::as_sbc(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpSbc, s, c);
}
BufferOffset Assembler::as_sub(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpSub, s, c);
}
BufferOffset Assembler::as_rsb(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpRsb, s, c);
}
BufferOffset Assembler::as_rsc(Register dest, Register src1, Operand2 op2,
SBit s, Condition c) {
return as_alu(dest, src1, op2, OpRsc, s, c);
}
// Test operations.
BufferOffset Assembler::as_cmn(Register src1, Operand2 op2, Condition c) {
return as_alu(InvalidReg, src1, op2, OpCmn, SetCC, c);
}
BufferOffset Assembler::as_cmp(Register src1, Operand2 op2, Condition c) {
return as_alu(InvalidReg, src1, op2, OpCmp, SetCC, c);
}
BufferOffset Assembler::as_teq(Register src1, Operand2 op2, Condition c) {
return as_alu(InvalidReg, src1, op2, OpTeq, SetCC, c);
}
BufferOffset Assembler::as_tst(Register src1, Operand2 op2, Condition c) {
return as_alu(InvalidReg, src1, op2, OpTst, SetCC, c);
}
static constexpr Register NoAddend{Registers::pc};
static const int SignExtend = 0x06000070;
enum SignExtend {
SxSxtb = 10 << 20,
SxSxth = 11 << 20,
SxUxtb = 14 << 20,
SxUxth = 15 << 20
};
// Sign extension operations.
BufferOffset Assembler::as_sxtb(Register dest, Register src, int rotate,
Condition c) {
return writeInst((int)c | SignExtend | SxSxtb | RN(NoAddend) | RD(dest) |
((rotate & 3) << 10) | src.code());
}
BufferOffset Assembler::as_sxth(Register dest, Register src, int rotate,
Condition c) {
return writeInst((int)c | SignExtend | SxSxth | RN(NoAddend) | RD(dest) |
((rotate & 3) << 10) | src.code());
}
BufferOffset Assembler::as_uxtb(Register dest, Register src, int rotate,
Condition c) {
return writeInst((int)c | SignExtend | SxUxtb | RN(NoAddend) | RD(dest) |
((rotate & 3) << 10) | src.code());
}
BufferOffset Assembler::as_uxth(Register dest, Register src, int rotate,
Condition c) {
return writeInst((int)c | SignExtend | SxUxth | RN(NoAddend) | RD(dest) |
((rotate & 3) << 10) | src.code());
}
static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
MOZ_ASSERT(HasMOVWT());
return 0x03000000 | c | imm.encode() | RD(dest);
}
static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
MOZ_ASSERT(HasMOVWT());
return 0x03400000 | c | imm.encode() | RD(dest);
}
// Not quite ALU worthy, but these are useful none the less. These also have
// the isue of these being formatted completly differently from the standard ALU
// operations.
BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) {
return writeInst(EncodeMovW(dest, imm, c));
}
/* static */
void Assembler::as_movw_patch(Register dest, Imm16 imm, Condition c,
Instruction* pos) {
WriteInstStatic(EncodeMovW(dest, imm, c), (uint32_t*)pos);
}
BufferOffset Assembler::as_movt(Register dest, Imm16 imm, Condition c) {
return writeInst(EncodeMovT(dest, imm, c));
}
/* static */
void Assembler::as_movt_patch(Register dest, Imm16 imm, Condition c,
Instruction* pos) {
WriteInstStatic(EncodeMovT(dest, imm, c), (uint32_t*)pos);
}
static const int mull_tag = 0x90;
BufferOffset Assembler::as_genmul(Register dhi, Register dlo, Register rm,
Register rn, MULOp op, SBit s, Condition c) {
return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | s | c |
mull_tag);
}
BufferOffset Assembler::as_mul(Register dest, Register src1, Register src2,
SBit s, Condition c) {
return as_genmul(dest, InvalidReg, src1, src2, OpmMul, s, c);
}
BufferOffset Assembler::as_mla(Register dest, Register acc, Register src1,
Register src2, SBit s, Condition c) {
return as_genmul(dest, acc, src1, src2, OpmMla, s, c);
}
BufferOffset Assembler::as_umaal(Register destHI, Register destLO,
Register src1, Register src2, Condition c) {
return as_genmul(destHI, destLO, src1, src2, OpmUmaal, LeaveCC, c);
}
BufferOffset Assembler::as_mls(Register dest, Register acc, Register src1,
Register src2, Condition c) {
return as_genmul(dest, acc, src1, src2, OpmMls, LeaveCC, c);
}
BufferOffset Assembler::as_umull(Register destHI, Register destLO,
Register src1, Register src2, SBit s,
Condition c) {
return as_genmul(destHI, destLO, src1, src2, OpmUmull, s, c);
}
BufferOffset Assembler::as_umlal(Register destHI, Register destLO,
Register src1, Register src2, SBit s,
Condition c) {
return as_genmul(destHI, destLO, src1, src2, OpmUmlal, s, c);
}
BufferOffset Assembler::as_smull(Register destHI, Register destLO,
Register src1, Register src2, SBit s,
Condition c) {
return as_genmul(destHI, destLO, src1, src2, OpmSmull, s, c);
}
BufferOffset Assembler::as_smlal(Register destHI, Register destLO,
Register src1, Register src2, SBit s,
Condition c) {
return as_genmul(destHI, destLO, src1, src2, OpmSmlal, s, c);
}
BufferOffset Assembler::as_sdiv(Register rd, Register rn, Register rm,
Condition c) {
return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code());
}
BufferOffset Assembler::as_udiv(Register rd, Register rn, Register rm,
Condition c) {
return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code());
}
BufferOffset Assembler::as_clz(Register dest, Register src, Condition c) {
MOZ_ASSERT(src != pc && dest != pc);
return writeInst(RD(dest) | src.code() | c | 0x016f0f10);
}
// Data transfer instructions: ldr, str, ldrb, strb. Using an int to
// differentiate between 8 bits and 32 bits is overkill, but meh.
static uint32_t EncodeDtr(LoadStore ls, int size, Index mode, Register rt,
DTRAddr addr, Assembler::Condition c) {
MOZ_ASSERT(mode == Offset || (rt != addr.getBase() && pc != addr.getBase()));
MOZ_ASSERT(size == 32 || size == 8);
return 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | RT(rt) |
addr.encode();
}
BufferOffset Assembler::as_dtr(LoadStore ls, int size, Index mode, Register rt,
DTRAddr addr, Condition c) {
return writeInst(EncodeDtr(ls, size, mode, rt, addr, c));
}
/* static */
void Assembler::as_dtr_patch(LoadStore ls, int size, Index mode, Register rt,
DTRAddr addr, Condition c, uint32_t* dest) {
WriteInstStatic(EncodeDtr(ls, size, mode, rt, addr, c), dest);
}
class PoolHintData {
public:
enum LoadType {
// Set 0 to bogus, since that is the value most likely to be
// accidentally left somewhere.
PoolBOGUS = 0,
PoolDTR = 1,
PoolBranch = 2,
PoolVDTR = 3
};
private:
uint32_t index_ : 16;
uint32_t cond_ : 4;
uint32_t loadType_ : 2;
uint32_t destReg_ : 5;
uint32_t destType_ : 1;
uint32_t ONES : 4;
static const uint32_t ExpectedOnes = 0xfu;
public:
void init(uint32_t index, Assembler::Condition cond, LoadType lt,
Register destReg) {
index_ = index;
MOZ_ASSERT(index_ == index);
cond_ = cond >> 28;
MOZ_ASSERT(cond_ == cond >> 28);
loadType_ = lt;
ONES = ExpectedOnes;
destReg_ = destReg.code();
destType_ = 0;
}
void init(uint32_t index, Assembler::Condition cond, LoadType lt,
const VFPRegister& destReg) {
MOZ_ASSERT(destReg.isFloat());
index_ = index;
MOZ_ASSERT(index_ == index);
cond_ = cond >> 28;
MOZ_ASSERT(cond_ == cond >> 28);
loadType_ = lt;
ONES = ExpectedOnes;
destReg_ = destReg.id();
destType_ = destReg.isDouble();
}
Assembler::Condition getCond() const {
return Assembler::Condition(cond_ << 28);
}
Register getReg() const { return Register::FromCode(destReg_); }
VFPRegister getVFPReg() const {
VFPRegister r = VFPRegister(
destReg_, destType_ ? VFPRegister::Double : VFPRegister::Single);
return r;
}
int32_t getIndex() const { return index_; }
void setIndex(uint32_t index) {
MOZ_ASSERT(ONES == ExpectedOnes && loadType_ != PoolBOGUS);
index_ = index;
MOZ_ASSERT(index_ == index);
}
LoadType getLoadType() const {
// If this *was* a PoolBranch, but the branch has already been bound
// then this isn't going to look like a real poolhintdata, but we still
// want to lie about it so everyone knows it *used* to be a branch.
if (ONES != ExpectedOnes) {
return PoolHintData::PoolBranch;
}
return static_cast<LoadType>(loadType_);
}
bool isValidPoolHint() const {
// Most instructions cannot have a condition that is 0xf. Notable
// exceptions are blx and the entire NEON instruction set. For the
// purposes of pool loads, and possibly patched branches, the possible
// instructions are ldr and b, neither of which can have a condition
// code of 0xf.
return ONES == ExpectedOnes;
}
};
union PoolHintPun {
PoolHintData phd;
uint32_t raw;
};
// Handles all of the other integral data transferring functions: ldrsb, ldrsh,
// ldrd, etc. The size is given in bits.
BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
Index mode, Register rt, EDtrAddr addr,
Condition c) {
int extra_bits2 = 0;
int extra_bits1 = 0;
switch (size) {
case 8:
MOZ_ASSERT(IsSigned);
MOZ_ASSERT(ls != IsStore);
extra_bits1 = 0x1;
extra_bits2 = 0x2;
break;
case 16:
// 'case 32' doesn't need to be handled, it is handled by the default
// ldr/str.
extra_bits2 = 0x01;
extra_bits1 = (ls == IsStore) ? 0 : 1;
if (IsSigned) {
MOZ_ASSERT(ls != IsStore);
extra_bits2 |= 0x2;
}
break;
case 64:
extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
extra_bits1 = 0;
break;
default:
MOZ_CRASH("unexpected size in as_extdtr");
}
return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
RT(rt) | mode | c);
}
BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
DTMMode mode, DTMWriteBack wb, Condition c) {
return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
}
BufferOffset Assembler::allocLiteralLoadEntry(
size_t numInst, unsigned numPoolEntries, PoolHintPun& php, uint8_t* data,
const LiteralDoc& doc, ARMBuffer::PoolEntry* pe, bool loadToPC) {
uint8_t* inst = (uint8_t*)&php.raw;
MOZ_ASSERT(inst);
MOZ_ASSERT(numInst == 1); // Or fix the disassembly
BufferOffset offs =
m_buffer.allocEntry(numInst, numPoolEntries, inst, data, pe);
propagateOOM(offs.assigned());
#ifdef JS_DISASM_ARM
Instruction* instruction = m_buffer.getInstOrNull(offs);
if (instruction) {
spewLiteralLoad(php, loadToPC, instruction, doc);
}
#endif
return offs;
}
// This is also used for instructions that might be resolved into branches,
// or might not. If dest==pc then it is effectively a branch.
BufferOffset Assembler::as_Imm32Pool(Register dest, uint32_t value,
Condition c) {
PoolHintPun php;
php.phd.init(0, c, PoolHintData::PoolDTR, dest);
BufferOffset offs = allocLiteralLoadEntry(
1, 1, php, (uint8_t*)&value, LiteralDoc(value), nullptr, dest == pc);
return offs;
}
/* static */
void Assembler::WritePoolEntry(Instruction* addr, Condition c, uint32_t data) {
MOZ_ASSERT(addr->is<InstLDR>());
*addr->as<InstLDR>()->dest() = data;
MOZ_ASSERT(addr->extractCond() == c);
}
BufferOffset Assembler::as_FImm64Pool(VFPRegister dest, double d, Condition c) {
MOZ_ASSERT(dest.isDouble());
PoolHintPun php;
php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
return allocLiteralLoadEntry(1, 2, php, (uint8_t*)&d, LiteralDoc(d));
}
BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) {
// Insert floats into the double pool as they have the same limitations on
// immediate offset. This wastes 4 bytes padding per float. An alternative
// would be to have a separate pool for floats.
MOZ_ASSERT(dest.isSingle());
PoolHintPun php;
php.phd.init(0, c, PoolHintData::PoolVDTR, dest);
return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
}
// Pool callbacks stuff:
void Assembler::InsertIndexIntoTag(uint8_t* load_, uint32_t index) {
uint32_t* load = (uint32_t*)load_;
PoolHintPun php;
php.raw = *load;
php.phd.setIndex(index);
*load = php.raw;
}
// patchConstantPoolLoad takes the address of the instruction that wants to be
// patched, and the address of the start of the constant pool, and figures
// things out from there.
void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
PoolHintData data = *(PoolHintData*)loadAddr;
uint32_t* instAddr = (uint32_t*)loadAddr;
int offset = (char*)constPoolAddr - (char*)loadAddr;
switch (data.getLoadType()) {
case PoolHintData::PoolBOGUS:
MOZ_CRASH("bogus load type!");
case PoolHintData::PoolDTR:
Assembler::as_dtr_patch(
IsLoad, 32, Offset, data.getReg(),
DTRAddr(pc,