Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/arm/MacroAssembler-arm.h"
#include "mozilla/Attributes.h"
#include "mozilla/Casting.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Maybe.h"
#include "jit/arm/Simulator-arm.h"
#include "jit/AtomicOp.h"
#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/JitFrames.h"
#include "jit/MacroAssembler.h"
#include "jit/MoveEmitter.h"
#include "util/Memory.h"
#include "vm/JitActivation.h" // js::jit::JitActivation
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace jit;
using mozilla::Abs;
using mozilla::BitwiseCast;
using mozilla::DebugOnly;
using mozilla::IsPositiveZero;
using mozilla::Maybe;
bool isValueDTRDCandidate(ValueOperand& val) {
// In order to be used for a DTRD memory function, the two target registers
// need to be a) Adjacent, with the tag larger than the payload, and b)
// Aligned to a multiple of two.
if ((val.typeReg().code() != (val.payloadReg().code() + 1))) {
return false;
}
if ((val.payloadReg().code() & 1) != 0) {
return false;
}
return true;
}
void MacroAssemblerARM::convertBoolToInt32(Register source, Register dest) {
// Note that C++ bool is only 1 byte, so zero extend it to clear the
// higher-order bits.
as_and(dest, source, Imm8(0xff));
}
void MacroAssemblerARM::convertInt32ToDouble(Register src,
FloatRegister dest_) {
// Direct conversions aren't possible.
VFPRegister dest = VFPRegister(dest_);
as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
as_vcvt(dest, dest.sintOverlay());
}
void MacroAssemblerARM::convertInt32ToDouble(const Address& src,
FloatRegister dest) {
ScratchDoubleScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_vldr(src, scratch, scratch2);
as_vcvt(dest, VFPRegister(scratch).sintOverlay());
}
void MacroAssemblerARM::convertInt32ToDouble(const BaseIndex& src,
FloatRegister dest) {
Register base = src.base;
uint32_t scale = Imm32::ShiftOf(src.scale).value;
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
if (src.offset != 0) {
ma_add(base, Imm32(src.offset), scratch, scratch2);
base = scratch;
}
ma_ldr(DTRAddr(base, DtrRegImmShift(src.index, LSL, scale)), scratch);
convertInt32ToDouble(scratch, dest);
}
void MacroAssemblerARM::convertUInt32ToDouble(Register src,
FloatRegister dest_) {
// Direct conversions aren't possible.
VFPRegister dest = VFPRegister(dest_);
as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
as_vcvt(dest, dest.uintOverlay());
}
static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
void MacroAssemblerARM::convertUInt32ToFloat32(Register src,
FloatRegister dest_) {
// Direct conversions aren't possible.
VFPRegister dest = VFPRegister(dest_);
as_vxfer(src, InvalidReg, dest.uintOverlay(), CoreToFloat);
as_vcvt(VFPRegister(dest).singleOverlay(), dest.uintOverlay());
}
void MacroAssemblerARM::convertDoubleToFloat32(FloatRegister src,
FloatRegister dest,
Condition c) {
as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src), false, c);
}
// Checks whether a double is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerARM::convertDoubleToInt32(FloatRegister src, Register dest,
Label* fail,
bool negativeZeroCheck) {
// Convert the floating point value to an integer, if it did not fit, then
// when we convert it *back* to a float, it will have a different value,
// which we can test.
ScratchDoubleScope scratchDouble(asMasm());
ScratchRegisterScope scratch(asMasm());
FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
ma_vcvt_F64_I32(src, scratchSIntReg);
// Move the value into the dest register.
ma_vxfer(scratchSIntReg, dest);
ma_vcvt_I32_F64(scratchSIntReg, scratchDouble);
ma_vcmp(src, scratchDouble);
as_vmrs(pc);
ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
if (negativeZeroCheck) {
as_cmp(dest, Imm8(0));
// Test and bail for -0.0, when integer result is 0. Move the top word
// of the double into the output reg, if it is non-zero, then the
// original value was -0.0.
as_vxfer(dest, InvalidReg, src, FloatToCore, Assembler::Equal, 1);
ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
ma_b(fail, Assembler::Equal);
}
}
// Checks whether a float32 is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerARM::convertFloat32ToInt32(FloatRegister src, Register dest,
Label* fail,
bool negativeZeroCheck) {
// Converting the floating point value to an integer and then converting it
// back to a float32 would not work, as float to int32 conversions are
// clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
// and then back to float(INT32_MAX + 1)). If this ever happens, we just
// bail out.
ScratchFloat32Scope scratchFloat(asMasm());
ScratchRegisterScope scratch(asMasm());
FloatRegister ScratchSIntReg = scratchFloat.sintOverlay();
ma_vcvt_F32_I32(src, ScratchSIntReg);
// Store the result
ma_vxfer(ScratchSIntReg, dest);
ma_vcvt_I32_F32(ScratchSIntReg, scratchFloat);
ma_vcmp(src, scratchFloat);
as_vmrs(pc);
ma_b(fail, Assembler::VFP_NotEqualOrUnordered);
// Bail out in the clamped cases.
ma_cmp(dest, Imm32(0x7fffffff), scratch);
ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
ma_b(fail, Assembler::Equal);
if (negativeZeroCheck) {
as_cmp(dest, Imm8(0));
// Test and bail for -0.0, when integer result is 0. Move the float into
// the output reg, and if it is non-zero then the original value was
// -0.0
as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore,
Assembler::Equal, 0);
ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::Equal);
ma_b(fail, Assembler::Equal);
}
}
void MacroAssemblerARM::convertFloat32ToDouble(FloatRegister src,
FloatRegister dest) {
MOZ_ASSERT(dest.isDouble());
MOZ_ASSERT(src.isSingle());
as_vcvt(VFPRegister(dest), VFPRegister(src).singleOverlay());
}
void MacroAssemblerARM::convertInt32ToFloat32(Register src,
FloatRegister dest) {
// Direct conversions aren't possible.
as_vxfer(src, InvalidReg, dest.sintOverlay(), CoreToFloat);
as_vcvt(dest.singleOverlay(), dest.sintOverlay());
}
void MacroAssemblerARM::convertInt32ToFloat32(const Address& src,
FloatRegister dest) {
ScratchFloat32Scope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_vldr(src, scratch, scratch2);
as_vcvt(dest, VFPRegister(scratch).sintOverlay());
}
bool MacroAssemblerARM::alu_dbl(Register src1, Imm32 imm, Register dest,
ALUOp op, SBit s, Condition c) {
if ((s == SetCC && !condsAreSafe(op)) || !can_dbl(op)) {
return false;
}
ALUOp interop = getDestVariant(op);
Imm8::TwoImm8mData both = Imm8::EncodeTwoImms(imm.value);
if (both.fst().invalid()) {
return false;
}
// For the most part, there is no good reason to set the condition codes for
// the first instruction. We can do better things if the second instruction
// doesn't have a dest, such as check for overflow by doing first operation
// don't do second operation if first operation overflowed. This preserves
// the overflow condition code. Unfortunately, it is horribly brittle.
as_alu(dest, src1, Operand2(both.fst()), interop, LeaveCC, c);
as_alu(dest, dest, Operand2(both.snd()), op, s, c);
return true;
}
void MacroAssemblerARM::ma_alu(Register src1, Imm32 imm, Register dest,
AutoRegisterScope& scratch, ALUOp op, SBit s,
Condition c) {
// ma_mov should be used for moves.
MOZ_ASSERT(op != OpMov);
MOZ_ASSERT(op != OpMvn);
MOZ_ASSERT(src1 != scratch);
// As it turns out, if you ask for a compare-like instruction you *probably*
// want it to set condition codes.
MOZ_ASSERT_IF(dest == InvalidReg, s == SetCC);
// The operator gives us the ability to determine how this can be used.
Imm8 imm8 = Imm8(imm.value);
// One instruction: If we can encode it using an imm8m, then do so.
if (!imm8.invalid()) {
as_alu(dest, src1, imm8, op, s, c);
return;
}
// One instruction, negated:
Imm32 negImm = imm;
Register negDest;
ALUOp negOp = ALUNeg(op, dest, scratch, &negImm, &negDest);
Imm8 negImm8 = Imm8(negImm.value);
// 'add r1, r2, -15' can be replaced with 'sub r1, r2, 15'.
// The dest can be replaced (InvalidReg => scratch).
// This is useful if we wish to negate tst. tst has an invalid (aka not
// used) dest, but its negation bic requires a dest.
if (negOp != OpInvalid && !negImm8.invalid()) {
as_alu(negDest, src1, negImm8, negOp, s, c);
return;
}
// Start by attempting to generate a two instruction form. Some things
// cannot be made into two-inst forms correctly. Namely, adds dest, src,
// 0xffff. Since we want the condition codes (and don't know which ones
// will be checked), we need to assume that the overflow flag will be
// checked and add{,s} dest, src, 0xff00; add{,s} dest, dest, 0xff is not
// guaranteed to set the overflof flag the same as the (theoretical) one
// instruction variant.
if (alu_dbl(src1, imm, dest, op, s, c)) {
return;
}
// And try with its negative.
if (negOp != OpInvalid && alu_dbl(src1, negImm, negDest, negOp, s, c)) {
return;
}
ma_mov(imm, scratch, c);
as_alu(dest, src1, O2Reg(scratch), op, s, c);
}
void MacroAssemblerARM::ma_alu(Register src1, Operand op2, Register dest,
ALUOp op, SBit s, Assembler::Condition c) {
MOZ_ASSERT(op2.tag() == Operand::Tag::OP2);
as_alu(dest, src1, op2.toOp2(), op, s, c);
}
void MacroAssemblerARM::ma_alu(Register src1, Operand2 op2, Register dest,
ALUOp op, SBit s, Condition c) {
as_alu(dest, src1, op2, op, s, c);
}
void MacroAssemblerARM::ma_nop() { as_nop(); }
BufferOffset MacroAssemblerARM::ma_movPatchable(Imm32 imm_, Register dest,
Assembler::Condition c) {
int32_t imm = imm_.value;
if (HasMOVWT()) {
BufferOffset offset = as_movw(dest, Imm16(imm & 0xffff), c);
as_movt(dest, Imm16(imm >> 16 & 0xffff), c);
return offset;
} else {
return as_Imm32Pool(dest, imm, c);
}
}
BufferOffset MacroAssemblerARM::ma_movPatchable(ImmPtr imm, Register dest,
Assembler::Condition c) {
return ma_movPatchable(Imm32(int32_t(imm.value)), dest, c);
}
/* static */
template <class Iter>
void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
Assembler::Condition c, RelocStyle rs,
Iter iter) {
// The current instruction must be an actual instruction,
// not automatically-inserted boilerplate.
MOZ_ASSERT(iter.cur());
MOZ_ASSERT(iter.cur() == iter.maybeSkipAutomaticInstructions());
int32_t imm = imm32.value;
switch (rs) {
case L_MOVWT:
Assembler::as_movw_patch(dest, Imm16(imm & 0xffff), c, iter.cur());
Assembler::as_movt_patch(dest, Imm16(imm >> 16 & 0xffff), c, iter.next());
break;
case L_LDR:
Assembler::WritePoolEntry(iter.cur(), c, imm);
break;
}
}
template void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
Assembler::Condition c,
RelocStyle rs,
InstructionIterator iter);
template void MacroAssemblerARM::ma_mov_patch(Imm32 imm32, Register dest,
Assembler::Condition c,
RelocStyle rs,
BufferInstructionIterator iter);
void MacroAssemblerARM::ma_mov(Register src, Register dest, SBit s,
Assembler::Condition c) {
if (s == SetCC || dest != src) {
as_mov(dest, O2Reg(src), s, c);
}
}
void MacroAssemblerARM::ma_mov(Imm32 imm, Register dest,
Assembler::Condition c) {
// Try mov with Imm8 operand.
Imm8 imm8 = Imm8(imm.value);
if (!imm8.invalid()) {
as_alu(dest, InvalidReg, imm8, OpMov, LeaveCC, c);
return;
}
// Try mvn with Imm8 operand.
Imm8 negImm8 = Imm8(~imm.value);
if (!negImm8.invalid()) {
as_alu(dest, InvalidReg, negImm8, OpMvn, LeaveCC, c);
return;
}
// Try movw/movt.
if (HasMOVWT()) {
// ARMv7 supports movw/movt. movw zero-extends its 16 bit argument,
// so we can set the register this way. movt leaves the bottom 16
// bits in tact, so we always need a movw.
as_movw(dest, Imm16(imm.value & 0xffff), c);
if (uint32_t(imm.value) >> 16) {
as_movt(dest, Imm16(uint32_t(imm.value) >> 16), c);
}
return;
}
// If we don't have movw/movt, we need a load.
as_Imm32Pool(dest, imm.value, c);
}
void MacroAssemblerARM::ma_mov(ImmWord imm, Register dest,
Assembler::Condition c) {
ma_mov(Imm32(imm.value), dest, c);
}
void MacroAssemblerARM::ma_mov(ImmGCPtr ptr, Register dest) {
BufferOffset offset =
ma_movPatchable(Imm32(uintptr_t(ptr.value)), dest, Always);
writeDataRelocation(offset, ptr);
}
// Shifts (just a move with a shifting op2)
void MacroAssemblerARM::ma_lsl(Imm32 shift, Register src, Register dst) {
as_mov(dst, lsl(src, shift.value));
}
void MacroAssemblerARM::ma_lsr(Imm32 shift, Register src, Register dst) {
as_mov(dst, lsr(src, shift.value));
}
void MacroAssemblerARM::ma_asr(Imm32 shift, Register src, Register dst) {
as_mov(dst, asr(src, shift.value));
}
void MacroAssemblerARM::ma_ror(Imm32 shift, Register src, Register dst) {
as_mov(dst, ror(src, shift.value));
}
void MacroAssemblerARM::ma_rol(Imm32 shift, Register src, Register dst) {
as_mov(dst, rol(src, shift.value));
}
// Shifts (just a move with a shifting op2)
void MacroAssemblerARM::ma_lsl(Register shift, Register src, Register dst) {
as_mov(dst, lsl(src, shift));
}
void MacroAssemblerARM::ma_lsr(Register shift, Register src, Register dst) {
as_mov(dst, lsr(src, shift));
}
void MacroAssemblerARM::ma_asr(Register shift, Register src, Register dst) {
as_mov(dst, asr(src, shift));
}
void MacroAssemblerARM::ma_ror(Register shift, Register src, Register dst) {
as_mov(dst, ror(src, shift));
}
void MacroAssemblerARM::ma_rol(Register shift, Register src, Register dst,
AutoRegisterScope& scratch) {
as_rsb(scratch, shift, Imm8(32));
as_mov(dst, ror(src, scratch));
}
// Move not (dest <- ~src)
void MacroAssemblerARM::ma_mvn(Register src1, Register dest, SBit s,
Assembler::Condition c) {
as_alu(dest, InvalidReg, O2Reg(src1), OpMvn, s, c);
}
// Negate (dest <- -src), src is a register, rather than a general op2.
void MacroAssemblerARM::ma_neg(Register src1, Register dest, SBit s,
Assembler::Condition c) {
as_rsb(dest, src1, Imm8(0), s, c);
}
// And.
void MacroAssemblerARM::ma_and(Register src, Register dest, SBit s,
Assembler::Condition c) {
ma_and(dest, src, dest);
}
void MacroAssemblerARM::ma_and(Register src1, Register src2, Register dest,
SBit s, Assembler::Condition c) {
as_and(dest, src1, O2Reg(src2), s, c);
}
void MacroAssemblerARM::ma_and(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(dest, imm, dest, scratch, OpAnd, s, c);
}
void MacroAssemblerARM::ma_and(Imm32 imm, Register src1, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(src1, imm, dest, scratch, OpAnd, s, c);
}
// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2).
void MacroAssemblerARM::ma_bic(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(dest, imm, dest, scratch, OpBic, s, c);
}
// Exclusive or.
void MacroAssemblerARM::ma_eor(Register src, Register dest, SBit s,
Assembler::Condition c) {
ma_eor(dest, src, dest, s, c);
}
void MacroAssemblerARM::ma_eor(Register src1, Register src2, Register dest,
SBit s, Assembler::Condition c) {
as_eor(dest, src1, O2Reg(src2), s, c);
}
void MacroAssemblerARM::ma_eor(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(dest, imm, dest, scratch, OpEor, s, c);
}
void MacroAssemblerARM::ma_eor(Imm32 imm, Register src1, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(src1, imm, dest, scratch, OpEor, s, c);
}
// Or.
void MacroAssemblerARM::ma_orr(Register src, Register dest, SBit s,
Assembler::Condition c) {
ma_orr(dest, src, dest, s, c);
}
void MacroAssemblerARM::ma_orr(Register src1, Register src2, Register dest,
SBit s, Assembler::Condition c) {
as_orr(dest, src1, O2Reg(src2), s, c);
}
void MacroAssemblerARM::ma_orr(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(dest, imm, dest, scratch, OpOrr, s, c);
}
void MacroAssemblerARM::ma_orr(Imm32 imm, Register src1, Register dest,
AutoRegisterScope& scratch, SBit s,
Assembler::Condition c) {
ma_alu(src1, imm, dest, scratch, OpOrr, s, c);
}
// Arithmetic-based ops.
// Add with carry.
void MacroAssemblerARM::ma_adc(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpAdc, s, c);
}
void MacroAssemblerARM::ma_adc(Register src, Register dest, SBit s,
Condition c) {
as_alu(dest, dest, O2Reg(src), OpAdc, s, c);
}
void MacroAssemblerARM::ma_adc(Register src1, Register src2, Register dest,
SBit s, Condition c) {
as_alu(dest, src1, O2Reg(src2), OpAdc, s, c);
}
// Add.
void MacroAssemblerARM::ma_add(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpAdd, s, c);
}
void MacroAssemblerARM::ma_add(Register src1, Register dest, SBit s,
Condition c) {
ma_alu(dest, O2Reg(src1), dest, OpAdd, s, c);
}
void MacroAssemblerARM::ma_add(Register src1, Register src2, Register dest,
SBit s, Condition c) {
as_alu(dest, src1, O2Reg(src2), OpAdd, s, c);
}
void MacroAssemblerARM::ma_add(Register src1, Operand op, Register dest, SBit s,
Condition c) {
ma_alu(src1, op, dest, OpAdd, s, c);
}
void MacroAssemblerARM::ma_add(Register src1, Imm32 op, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(src1, op, dest, scratch, OpAdd, s, c);
}
// Subtract with carry.
void MacroAssemblerARM::ma_sbc(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpSbc, s, c);
}
void MacroAssemblerARM::ma_sbc(Register src1, Register dest, SBit s,
Condition c) {
as_alu(dest, dest, O2Reg(src1), OpSbc, s, c);
}
void MacroAssemblerARM::ma_sbc(Register src1, Register src2, Register dest,
SBit s, Condition c) {
as_alu(dest, src1, O2Reg(src2), OpSbc, s, c);
}
// Subtract.
void MacroAssemblerARM::ma_sub(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpSub, s, c);
}
void MacroAssemblerARM::ma_sub(Register src1, Register dest, SBit s,
Condition c) {
ma_alu(dest, Operand(src1), dest, OpSub, s, c);
}
void MacroAssemblerARM::ma_sub(Register src1, Register src2, Register dest,
SBit s, Condition c) {
ma_alu(src1, Operand(src2), dest, OpSub, s, c);
}
void MacroAssemblerARM::ma_sub(Register src1, Operand op, Register dest, SBit s,
Condition c) {
ma_alu(src1, op, dest, OpSub, s, c);
}
void MacroAssemblerARM::ma_sub(Register src1, Imm32 op, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(src1, op, dest, scratch, OpSub, s, c);
}
// Reverse subtract.
void MacroAssemblerARM::ma_rsb(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpRsb, s, c);
}
void MacroAssemblerARM::ma_rsb(Register src1, Register dest, SBit s,
Condition c) {
as_alu(dest, src1, O2Reg(dest), OpRsb, s, c);
}
void MacroAssemblerARM::ma_rsb(Register src1, Register src2, Register dest,
SBit s, Condition c) {
as_alu(dest, src1, O2Reg(src2), OpRsb, s, c);
}
void MacroAssemblerARM::ma_rsb(Register src1, Imm32 op2, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(src1, op2, dest, scratch, OpRsb, s, c);
}
// Reverse subtract with carry.
void MacroAssemblerARM::ma_rsc(Imm32 imm, Register dest,
AutoRegisterScope& scratch, SBit s,
Condition c) {
ma_alu(dest, imm, dest, scratch, OpRsc, s, c);
}
void MacroAssemblerARM::ma_rsc(Register src1, Register dest, SBit s,
Condition c) {
as_alu(dest, dest, O2Reg(src1), OpRsc, s, c);
}
void MacroAssemblerARM::ma_rsc(Register src1, Register src2, Register dest,
SBit s, Condition c) {
as_alu(dest, src1, O2Reg(src2), OpRsc, s, c);
}
// Compares/tests.
// Compare negative (sets condition codes as src1 + src2 would).
void MacroAssemblerARM::ma_cmn(Register src1, Imm32 imm,
AutoRegisterScope& scratch, Condition c) {
ma_alu(src1, imm, InvalidReg, scratch, OpCmn, SetCC, c);
}
void MacroAssemblerARM::ma_cmn(Register src1, Register src2, Condition c) {
as_alu(InvalidReg, src2, O2Reg(src1), OpCmn, SetCC, c);
}
void MacroAssemblerARM::ma_cmn(Register src1, Operand op, Condition c) {
MOZ_CRASH("Feature NYI");
}
// Compare (src - src2).
void MacroAssemblerARM::ma_cmp(Register src1, Imm32 imm,
AutoRegisterScope& scratch, Condition c) {
ma_alu(src1, imm, InvalidReg, scratch, OpCmp, SetCC, c);
}
void MacroAssemblerARM::ma_cmp(Register src1, ImmTag tag, Condition c) {
// ImmTag comparisons can always be done without use of a scratch register.
Imm8 negtag = Imm8(-tag.value);
MOZ_ASSERT(!negtag.invalid());
as_cmn(src1, negtag, c);
}
void MacroAssemblerARM::ma_cmp(Register src1, ImmWord ptr,
AutoRegisterScope& scratch, Condition c) {
ma_cmp(src1, Imm32(ptr.value), scratch, c);
}
void MacroAssemblerARM::ma_cmp(Register src1, ImmGCPtr ptr,
AutoRegisterScope& scratch, Condition c) {
ma_mov(ptr, scratch);
ma_cmp(src1, scratch, c);
}
void MacroAssemblerARM::ma_cmp(Register src1, Operand op,
AutoRegisterScope& scratch,
AutoRegisterScope& scratch2, Condition c) {
switch (op.tag()) {
case Operand::Tag::OP2:
as_cmp(src1, op.toOp2(), c);
break;
case Operand::Tag::MEM:
ma_ldr(op.toAddress(), scratch, scratch2);
as_cmp(src1, O2Reg(scratch), c);
break;
default:
MOZ_CRASH("trying to compare FP and integer registers");
}
}
void MacroAssemblerARM::ma_cmp(Register src1, Register src2, Condition c) {
as_cmp(src1, O2Reg(src2), c);
}
// Test for equality, (src1 ^ src2).
void MacroAssemblerARM::ma_teq(Register src1, Imm32 imm,
AutoRegisterScope& scratch, Condition c) {
ma_alu(src1, imm, InvalidReg, scratch, OpTeq, SetCC, c);
}
void MacroAssemblerARM::ma_teq(Register src1, Register src2, Condition c) {
as_tst(src1, O2Reg(src2), c);
}
void MacroAssemblerARM::ma_teq(Register src1, Operand op, Condition c) {
as_teq(src1, op.toOp2(), c);
}
// Test (src1 & src2).
void MacroAssemblerARM::ma_tst(Register src1, Imm32 imm,
AutoRegisterScope& scratch, Condition c) {
ma_alu(src1, imm, InvalidReg, scratch, OpTst, SetCC, c);
}
void MacroAssemblerARM::ma_tst(Register src1, Register src2, Condition c) {
as_tst(src1, O2Reg(src2), c);
}
void MacroAssemblerARM::ma_tst(Register src1, Operand op, Condition c) {
as_tst(src1, op.toOp2(), c);
}
void MacroAssemblerARM::ma_mul(Register src1, Register src2, Register dest) {
as_mul(dest, src1, src2);
}
void MacroAssemblerARM::ma_mul(Register src1, Imm32 imm, Register dest,
AutoRegisterScope& scratch) {
ma_mov(imm, scratch);
as_mul(dest, src1, scratch);
}
Assembler::Condition MacroAssemblerARM::ma_check_mul(Register src1,
Register src2,
Register dest,
AutoRegisterScope& scratch,
Condition cond) {
// TODO: this operation is illegal on armv6 and earlier
// if src2 == scratch or src2 == dest.
if (cond == Equal || cond == NotEqual) {
as_smull(scratch, dest, src1, src2, SetCC);
return cond;
}
if (cond == Overflow) {
as_smull(scratch, dest, src1, src2);
as_cmp(scratch, asr(dest, 31));
return NotEqual;
}
MOZ_CRASH("Condition NYI");
}
Assembler::Condition MacroAssemblerARM::ma_check_mul(Register src1, Imm32 imm,
Register dest,
AutoRegisterScope& scratch,
Condition cond) {
ma_mov(imm, scratch);
if (cond == Equal || cond == NotEqual) {
as_smull(scratch, dest, scratch, src1, SetCC);
return cond;
}
if (cond == Overflow) {
as_smull(scratch, dest, scratch, src1);
as_cmp(scratch, asr(dest, 31));
return NotEqual;
}
MOZ_CRASH("Condition NYI");
}
void MacroAssemblerARM::ma_umull(Register src1, Imm32 imm, Register destHigh,
Register destLow, AutoRegisterScope& scratch) {
ma_mov(imm, scratch);
as_umull(destHigh, destLow, src1, scratch);
}
void MacroAssemblerARM::ma_umull(Register src1, Register src2,
Register destHigh, Register destLow) {
as_umull(destHigh, destLow, src1, src2);
}
void MacroAssemblerARM::ma_mod_mask(Register src, Register dest, Register hold,
Register tmp, AutoRegisterScope& scratch,
AutoRegisterScope& scratch2,
int32_t shift) {
// We wish to compute x % (1<<y) - 1 for a known constant, y.
//
// 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
// a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
//
// 2. Since both addition and multiplication commute with modulus:
// x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
// (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
//
// 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
// simplifies to: c_0 + c_1 + c_2 ... c_n % C
//
// Each c_n can easily be computed by a shift/bitextract, and the modulus
// can be maintained by simply subtracting by C whenever the number gets
// over C.
int32_t mask = (1 << shift) - 1;
Label head;
// Register 'hold' holds -1 if the value was negative, 1 otherwise. The
// scratch reg holds the remaining bits that have not been processed lr
// serves as a temporary location to store extracted bits into as well as
// holding the trial subtraction as a temp value dest is the accumulator
// (and holds the final result)
//
// Move the whole value into tmp, setting the codition codes so we can muck
// with them later.
as_mov(tmp, O2Reg(src), SetCC);
// Zero out the dest.
ma_mov(Imm32(0), dest);
// Set the hold appropriately.
ma_mov(Imm32(1), hold);
ma_mov(Imm32(-1), hold, Signed);
as_rsb(tmp, tmp, Imm8(0), SetCC, Signed);
// Begin the main loop.
bind(&head);
{
// Extract the bottom bits.
ma_and(Imm32(mask), tmp, scratch, scratch2);
// Add those bits to the accumulator.
ma_add(scratch, dest, dest);
// Do a trial subtraction, this is the same operation as cmp, but we store
// the dest.
ma_sub(dest, Imm32(mask), scratch, scratch2, SetCC);
// If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
ma_mov(scratch, dest, LeaveCC, NotSigned);
// Get rid of the bits that we extracted before, and set the condition
// codes.
as_mov(tmp, lsr(tmp, shift), SetCC);
// If the shift produced zero, finish, otherwise, continue in the loop.
ma_b(&head, NonZero);
}
// Check the hold to see if we need to negate the result. Hold can only be
// 1 or -1, so this will never set the 0 flag.
as_cmp(hold, Imm8(0));
// If the hold was non-zero, negate the result to be in line with what JS
// wants this will set the condition codes if we try to negate.
as_rsb(dest, dest, Imm8(0), SetCC, Signed);
// Since the Zero flag is not set by the compare, we can *only* set the Zero
// flag in the rsb, so Zero is set iff we negated zero (e.g. the result of
// the computation was -0.0).
}
void MacroAssemblerARM::ma_smod(Register num, Register div, Register dest,
AutoRegisterScope& scratch) {
as_sdiv(scratch, num, div);
as_mls(dest, num, scratch, div);
}
void MacroAssemblerARM::ma_umod(Register num, Register div, Register dest,
AutoRegisterScope& scratch) {
as_udiv(scratch, num, div);
as_mls(dest, num, scratch, div);
}
// Division
void MacroAssemblerARM::ma_sdiv(Register num, Register div, Register dest,
Condition cond) {
as_sdiv(dest, num, div, cond);
}
void MacroAssemblerARM::ma_udiv(Register num, Register div, Register dest,
Condition cond) {
as_udiv(dest, num, div, cond);
}
// Miscellaneous instructions.
void MacroAssemblerARM::ma_clz(Register src, Register dest, Condition cond) {
as_clz(dest, src, cond);
}
void MacroAssemblerARM::ma_ctz(Register src, Register dest,
AutoRegisterScope& scratch) {
// int c = __clz(a & -a);
// return a ? 31 - c : c;
as_rsb(scratch, src, Imm8(0), SetCC);
as_and(dest, src, O2Reg(scratch), LeaveCC);
as_clz(dest, dest);
as_rsb(dest, dest, Imm8(0x1F), LeaveCC, Assembler::NotEqual);
}
// Memory.
// Shortcut for when we know we're transferring 32 bits of data.
void MacroAssemblerARM::ma_dtr(LoadStore ls, Register rn, Imm32 offset,
Register rt, AutoRegisterScope& scratch,
Index mode, Assembler::Condition cc) {
ma_dataTransferN(ls, 32, true, rn, offset, rt, scratch, mode, cc);
}
void MacroAssemblerARM::ma_dtr(LoadStore ls, Register rt, const Address& addr,
AutoRegisterScope& scratch, Index mode,
Condition cc) {
ma_dataTransferN(ls, 32, true, addr.base, Imm32(addr.offset), rt, scratch,
mode, cc);
}
void MacroAssemblerARM::ma_str(Register rt, DTRAddr addr, Index mode,
Condition cc) {
as_dtr(IsStore, 32, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_str(Register rt, const Address& addr,
AutoRegisterScope& scratch, Index mode,
Condition cc) {
ma_dtr(IsStore, rt, addr, scratch, mode, cc);
}
void MacroAssemblerARM::ma_strd(Register rt, DebugOnly<Register> rt2,
EDtrAddr addr, Index mode, Condition cc) {
MOZ_ASSERT((rt.code() & 1) == 0);
MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
as_extdtr(IsStore, 64, true, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldr(DTRAddr addr, Register rt, Index mode,
Condition cc) {
as_dtr(IsLoad, 32, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldr(const Address& addr, Register rt,
AutoRegisterScope& scratch, Index mode,
Condition cc) {
ma_dtr(IsLoad, rt, addr, scratch, mode, cc);
}
void MacroAssemblerARM::ma_ldrb(DTRAddr addr, Register rt, Index mode,
Condition cc) {
as_dtr(IsLoad, 8, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldrsh(EDtrAddr addr, Register rt, Index mode,
Condition cc) {
as_extdtr(IsLoad, 16, true, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldrh(EDtrAddr addr, Register rt, Index mode,
Condition cc) {
as_extdtr(IsLoad, 16, false, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldrsb(EDtrAddr addr, Register rt, Index mode,
Condition cc) {
as_extdtr(IsLoad, 8, true, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_ldrd(EDtrAddr addr, Register rt,
DebugOnly<Register> rt2, Index mode,
Condition cc) {
MOZ_ASSERT((rt.code() & 1) == 0);
MOZ_ASSERT(rt2.value.code() == rt.code() + 1);
MOZ_ASSERT(addr.maybeOffsetRegister() !=
rt); // Undefined behavior if rm == rt/rt2.
MOZ_ASSERT(addr.maybeOffsetRegister() != rt2);
as_extdtr(IsLoad, 64, true, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_strh(Register rt, EDtrAddr addr, Index mode,
Condition cc) {
as_extdtr(IsStore, 16, false, mode, rt, addr, cc);
}
void MacroAssemblerARM::ma_strb(Register rt, DTRAddr addr, Index mode,
Condition cc) {
as_dtr(IsStore, 8, mode, rt, addr, cc);
}
// Specialty for moving N bits of data, where n == 8,16,32,64.
BufferOffset MacroAssemblerARM::ma_dataTransferN(
LoadStore ls, int size, bool IsSigned, Register rn, Register rm,
Register rt, AutoRegisterScope& scratch, Index mode,
Assembler::Condition cc, Scale scale) {
MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
if (size == 32 || (size == 8 && !IsSigned)) {
return as_dtr(ls, size, mode, rt,
DTRAddr(rn, DtrRegImmShift(rm, LSL, scale)), cc);
}
if (scale != TimesOne) {
ma_lsl(Imm32(scale), rm, scratch);
rm = scratch;
}
return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)),
cc);
}
// No scratch register is required if scale is TimesOne.
BufferOffset MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
bool IsSigned, Register rn,
Register rm, Register rt,
Index mode,
Assembler::Condition cc) {
MOZ_ASSERT(size == 8 || size == 16 || size == 32 || size == 64);
if (size == 32 || (size == 8 && !IsSigned)) {
return as_dtr(ls, size, mode, rt,
DTRAddr(rn, DtrRegImmShift(rm, LSL, TimesOne)), cc);
}
return as_extdtr(ls, size, IsSigned, mode, rt, EDtrAddr(rn, EDtrOffReg(rm)),
cc);
}
BufferOffset MacroAssemblerARM::ma_dataTransferN(LoadStore ls, int size,
bool IsSigned, Register rn,
Imm32 offset, Register rt,
AutoRegisterScope& scratch,
Index mode,
Assembler::Condition cc) {
MOZ_ASSERT(!(ls == IsLoad && mode == PostIndex && rt == pc),
"Large-offset PostIndex loading into PC requires special logic: "
"see ma_popn_pc().");
int off = offset.value;
// We can encode this as a standard ldr.
if (size == 32 || (size == 8 && !IsSigned)) {
if (off < 4096 && off > -4096) {
// This encodes as a single instruction, Emulating mode's behavior
// in a multi-instruction sequence is not necessary.
return as_dtr(ls, size, mode, rt, DTRAddr(rn, DtrOffImm(off)), cc);
}
// We cannot encode this offset in a single ldr. For mode == index,
// try to encode it as |add scratch, base, imm; ldr dest, [scratch,
// +offset]|. This does not wark for mode == PreIndex or mode == PostIndex.
// PreIndex is simple, just do the add into the base register first,
// then do a PreIndex'ed load. PostIndexed loads can be tricky.
// Normally, doing the load with an index of 0, then doing an add would
// work, but if the destination is the PC, you don't get to execute the
// instruction after the branch, which will lead to the base register
// not being updated correctly. Explicitly handle this case, without
// doing anything fancy, then handle all of the other cases.
// mode == Offset
// add scratch, base, offset_hi
// ldr dest, [scratch, +offset_lo]
//
// mode == PreIndex
// add base, base, offset_hi
// ldr dest, [base, +offset_lo]!
int bottom = off & 0xfff;
int neg_bottom = 0x1000 - bottom;
MOZ_ASSERT(rn != scratch);
MOZ_ASSERT(mode != PostIndex);
// At this point, both off - bottom and off + neg_bottom will be
// reasonable-ish quantities.
//
// Note a neg_bottom of 0x1000 can not be encoded as an immediate
// negative offset in the instruction and this occurs when bottom is
// zero, so this case is guarded against below.
if (off < 0) {
Operand2 sub_off = Imm8(-(off - bottom)); // sub_off = bottom - off
if (!sub_off.invalid()) {
// - sub_off = off - bottom
as_sub(scratch, rn, sub_off, LeaveCC, cc);
return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)),
cc);
}
// sub_off = -neg_bottom - off
sub_off = Imm8(-(off + neg_bottom));
if (!sub_off.invalid() && bottom != 0) {
// Guarded against by: bottom != 0
MOZ_ASSERT(neg_bottom < 0x1000);
// - sub_off = neg_bottom + off
as_sub(scratch, rn, sub_off, LeaveCC, cc);
return as_dtr(ls, size, Offset, rt,
DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
}
} else {
// sub_off = off - bottom
Operand2 sub_off = Imm8(off - bottom);
if (!sub_off.invalid()) {
// sub_off = off - bottom
as_add(scratch, rn, sub_off, LeaveCC, cc);
return as_dtr(ls, size, Offset, rt, DTRAddr(scratch, DtrOffImm(bottom)),
cc);
}
// sub_off = neg_bottom + off
sub_off = Imm8(off + neg_bottom);
if (!sub_off.invalid() && bottom != 0) {
// Guarded against by: bottom != 0
MOZ_ASSERT(neg_bottom < 0x1000);
// sub_off = neg_bottom + off
as_add(scratch, rn, sub_off, LeaveCC, cc);
return as_dtr(ls, size, Offset, rt,
DTRAddr(scratch, DtrOffImm(-neg_bottom)), cc);
}
}
ma_mov(offset, scratch);
return as_dtr(ls, size, mode, rt,
DTRAddr(rn, DtrRegImmShift(scratch, LSL, 0)));
} else {
// Should attempt to use the extended load/store instructions.
if (off < 256 && off > -256) {
return as_extdtr(ls, size, IsSigned, mode, rt,
EDtrAddr(rn, EDtrOffImm(off)), cc);
}
// We cannot encode this offset in a single extldr. Try to encode it as
// an add scratch, base, imm; extldr dest, [scratch, +offset].
int bottom = off & 0xff;
int neg_bottom = 0x100 - bottom;
// At this point, both off - bottom and off + neg_bottom will be
// reasonable-ish quantities.
//
// Note a neg_bottom of 0x100 can not be encoded as an immediate
// negative offset in the instruction and this occurs when bottom is
// zero, so this case is guarded against below.
if (off < 0) {
// sub_off = bottom - off
Operand2 sub_off = Imm8(-(off - bottom));
if (!sub_off.invalid()) {
// - sub_off = off - bottom
as_sub(scratch, rn, sub_off, LeaveCC, cc);
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(scratch, EDtrOffImm(bottom)), cc);
}
// sub_off = -neg_bottom - off
sub_off = Imm8(-(off + neg_bottom));
if (!sub_off.invalid() && bottom != 0) {
// Guarded against by: bottom != 0
MOZ_ASSERT(neg_bottom < 0x100);
// - sub_off = neg_bottom + off
as_sub(scratch, rn, sub_off, LeaveCC, cc);
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), cc);
}
} else {
// sub_off = off - bottom
Operand2 sub_off = Imm8(off - bottom);
if (!sub_off.invalid()) {
// sub_off = off - bottom
as_add(scratch, rn, sub_off, LeaveCC, cc);
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(scratch, EDtrOffImm(bottom)), cc);
}
// sub_off = neg_bottom + off
sub_off = Imm8(off + neg_bottom);
if (!sub_off.invalid() && bottom != 0) {
// Guarded against by: bottom != 0
MOZ_ASSERT(neg_bottom < 0x100);
// sub_off = neg_bottom + off
as_add(scratch, rn, sub_off, LeaveCC, cc);
return as_extdtr(ls, size, IsSigned, Offset, rt,
EDtrAddr(scratch, EDtrOffImm(-neg_bottom)), cc);
}
}
ma_mov(offset, scratch);
return as_extdtr(ls, size, IsSigned, mode, rt,
EDtrAddr(rn, EDtrOffReg(scratch)), cc);
}
}
void MacroAssemblerARM::ma_pop(Register r) {
as_dtr(IsLoad, 32, PostIndex, r, DTRAddr(sp, DtrOffImm(4)));
}
void MacroAssemblerARM::ma_popn_pc(Imm32 n, AutoRegisterScope& scratch,
AutoRegisterScope& scratch2) {
// pc <- [sp]; sp += n
int32_t nv = n.value;
if (nv < 4096 && nv >= -4096) {
as_dtr(IsLoad, 32, PostIndex, pc, DTRAddr(sp, DtrOffImm(nv)));
} else {
ma_mov(sp, scratch);
ma_add(Imm32(n), sp, scratch2);
as_dtr(IsLoad, 32, Offset, pc, DTRAddr(scratch, DtrOffImm(0)));
}
}
void MacroAssemblerARM::ma_push(Register r) {
MOZ_ASSERT(r != sp, "Use ma_push_sp().");
as_dtr(IsStore, 32, PreIndex, r, DTRAddr(sp, DtrOffImm(-4)));
}
void MacroAssemblerARM::ma_push_sp(Register r, AutoRegisterScope& scratch) {
// Pushing sp is not well-defined: use two instructions.
MOZ_ASSERT(r == sp);
ma_mov(sp, scratch);
as_dtr(IsStore, 32, PreIndex, scratch, DTRAddr(sp, DtrOffImm(-4)));
}
void MacroAssemblerARM::ma_vpop(VFPRegister r) {
startFloatTransferM(IsLoad, sp, IA, WriteBack);
transferFloatReg(r);
finishFloatTransfer();
}
void MacroAssemblerARM::ma_vpush(VFPRegister r) {
startFloatTransferM(IsStore, sp, DB, WriteBack);
transferFloatReg(r);
finishFloatTransfer();
}
// Barriers
void MacroAssemblerARM::ma_dmb(BarrierOption option) {
if (HasDMBDSBISB()) {
as_dmb(option);
} else {
as_dmb_trap();
}
}
void MacroAssemblerARM::ma_dsb(BarrierOption option) {
if (HasDMBDSBISB()) {
as_dsb(option);
} else {
as_dsb_trap();
}
}
// Branches when done from within arm-specific code.
BufferOffset MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c) {
return as_b(dest, c);
}
void MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c) {
as_bx(dest, c);
}
void MacroAssemblerARM::ma_b(void* target, Assembler::Condition c) {
// An immediate pool is used for easier patching.
as_Imm32Pool(pc, uint32_t(target), c);
}
// This is almost NEVER necessary: we'll basically never be calling a label,
// except possibly in the crazy bailout-table case.
void MacroAssemblerARM::ma_bl(Label* dest, Assembler::Condition c) {
as_bl(dest, c);
}
void MacroAssemblerARM::ma_blx(Register reg, Assembler::Condition c) {
as_blx(reg, c);
}
// VFP/ALU
void MacroAssemblerARM::ma_vadd(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vadd(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
}
void MacroAssemblerARM::ma_vadd_f32(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vadd(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
VFPRegister(src2).singleOverlay());
}
void MacroAssemblerARM::ma_vsub(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vsub(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
}
void MacroAssemblerARM::ma_vsub_f32(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vsub(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
VFPRegister(src2).singleOverlay());
}
void MacroAssemblerARM::ma_vmul(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vmul(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
}
void MacroAssemblerARM::ma_vmul_f32(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vmul(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
VFPRegister(src2).singleOverlay());
}
void MacroAssemblerARM::ma_vdiv(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vdiv(VFPRegister(dst), VFPRegister(src1), VFPRegister(src2));
}
void MacroAssemblerARM::ma_vdiv_f32(FloatRegister src1, FloatRegister src2,
FloatRegister dst) {
as_vdiv(VFPRegister(dst).singleOverlay(), VFPRegister(src1).singleOverlay(),
VFPRegister(src2).singleOverlay());
}
void MacroAssemblerARM::ma_vmov(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vmov(dest, src, cc);
}
void MacroAssemblerARM::ma_vmov_f32(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
cc);
}
void MacroAssemblerARM::ma_vneg(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vneg(dest, src, cc);
}
void MacroAssemblerARM::ma_vneg_f32(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vneg(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
cc);
}
void MacroAssemblerARM::ma_vabs(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vabs(dest, src, cc);
}
void MacroAssemblerARM::ma_vabs_f32(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vabs(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
cc);
}
void MacroAssemblerARM::ma_vsqrt(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vsqrt(dest, src, cc);
}
void MacroAssemblerARM::ma_vsqrt_f32(FloatRegister src, FloatRegister dest,
Condition cc) {
as_vsqrt(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(),
cc);
}
static inline uint32_t DoubleHighWord(double d) {
return static_cast<uint32_t>(BitwiseCast<uint64_t>(d) >> 32);
}
static inline uint32_t DoubleLowWord(double d) {
return static_cast<uint32_t>(BitwiseCast<uint64_t>(d)) & uint32_t(0xffffffff);
}
void MacroAssemblerARM::ma_vimm(double value, FloatRegister dest,
Condition cc) {
if (HasVFPv3()) {
if (DoubleLowWord(value) == 0) {
if (DoubleHighWord(value) == 0) {
// To zero a register, load 1.0, then execute dN <- dN - dN
as_vimm(dest, VFPImm::One, cc);
as_vsub(dest, dest, dest, cc);
return;
}
VFPImm enc(DoubleHighWord(value));
if (enc.isValid()) {
as_vimm(dest, enc, cc);
return;
}
}
}
// Fall back to putting the value in a pool.
as_FImm64Pool(dest, value, cc);
}
void MacroAssemblerARM::ma_vimm_f32(float value, FloatRegister dest,
Condition cc) {
VFPRegister vd = VFPRegister(dest).singleOverlay();
if (HasVFPv3()) {
if (IsPositiveZero(value)) {
// To zero a register, load 1.0, then execute sN <- sN - sN.
as_vimm(vd, VFPImm::One, cc);
as_vsub(vd, vd, vd, cc);
return;
}
// Note that the vimm immediate float32 instruction encoding differs
// from the vimm immediate double encoding, but this difference matches
// the difference in the floating point formats, so it is possible to
// convert the float32 to a double and then use the double encoding
// paths. It is still necessary to firstly check that the double low
// word is zero because some float32 numbers set these bits and this can
// not be ignored.
double doubleValue(value);
if (DoubleLowWord(doubleValue) == 0) {
VFPImm enc(DoubleHighWord(doubleValue));
if (enc.isValid()) {
as_vimm(vd, enc, cc);
return;
}
}
}
// Fall back to putting the value in a pool.
as_FImm32Pool(vd, value, cc);
}
void MacroAssemblerARM::ma_vcmp(FloatRegister src1, FloatRegister src2,
Condition cc) {
as_vcmp(VFPRegister(src1), VFPRegister(src2), cc);
}
void MacroAssemblerARM::ma_vcmp_f32(FloatRegister src1, FloatRegister src2,
Condition cc) {
as_vcmp(VFPRegister(src1).singleOverlay(), VFPRegister(src2).singleOverlay(),
cc);
}
void MacroAssemblerARM::ma_vcmpz(FloatRegister src1, Condition cc) {
as_vcmpz(VFPRegister(src1), cc);
}
void MacroAssemblerARM::ma_vcmpz_f32(FloatRegister src1, Condition cc) {
as_vcmpz(VFPRegister(src1).singleOverlay(), cc);
}
void MacroAssemblerARM::ma_vcvt_F64_I32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isDouble());
MOZ_ASSERT(dest.isSInt());
as_vcvt(dest, src, false, cc);
}
void MacroAssemblerARM::ma_vcvt_F64_U32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isDouble());
MOZ_ASSERT(dest.isUInt());
as_vcvt(dest, src, false, cc);
}
void MacroAssemblerARM::ma_vcvt_I32_F64(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isSInt());
MOZ_ASSERT(dest.isDouble());
as_vcvt(dest, src, false, cc);
}
void MacroAssemblerARM::ma_vcvt_U32_F64(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isUInt());
MOZ_ASSERT(dest.isDouble());
as_vcvt(dest, src, false, cc);
}
void MacroAssemblerARM::ma_vcvt_F32_I32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isSingle());
MOZ_ASSERT(dest.isSInt());
as_vcvt(VFPRegister(dest).sintOverlay(), VFPRegister(src).singleOverlay(),
false, cc);
}
void MacroAssemblerARM::ma_vcvt_F32_U32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isSingle());
MOZ_ASSERT(dest.isUInt());
as_vcvt(VFPRegister(dest).uintOverlay(), VFPRegister(src).singleOverlay(),
false, cc);
}
void MacroAssemblerARM::ma_vcvt_I32_F32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isSInt());
MOZ_ASSERT(dest.isSingle());
as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).sintOverlay(),
false, cc);
}
void MacroAssemblerARM::ma_vcvt_U32_F32(FloatRegister src, FloatRegister dest,
Condition cc) {
MOZ_ASSERT(src.isUInt());
MOZ_ASSERT(dest.isSingle());
as_vcvt(VFPRegister(dest).singleOverlay(), VFPRegister(src).uintOverlay(),
false, cc);
}
void MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest,
Condition cc) {
as_vxfer(dest, InvalidReg, VFPRegister(src).singleOverlay(), FloatToCore, cc);
}
void MacroAssemblerARM::ma_vxfer(FloatRegister src, Register dest1,
Register dest2, Condition cc) {
as_vxfer(dest1, dest2, VFPRegister(src), FloatToCore, cc);
}
void MacroAssemblerARM::ma_vxfer(Register src, FloatRegister dest,
Condition cc) {
as_vxfer(src, InvalidReg, VFPRegister(dest).singleOverlay(), CoreToFloat, cc);
}
void MacroAssemblerARM::ma_vxfer(Register src1, Register src2,
FloatRegister dest, Condition cc) {
as_vxfer(src1, src2, VFPRegister(dest), CoreToFloat, cc);
}
BufferOffset MacroAssemblerARM::ma_vdtr(LoadStore ls, const Address& addr,
VFPRegister rt,
AutoRegisterScope& scratch,
Condition cc) {
int off = addr.offset;
MOZ_ASSERT((off & 3) == 0);
Register base = addr.base;
if (off > -1024 && off < 1024) {
return as_vdtr(ls, rt, Operand(addr).toVFPAddr(), cc);
}
// We cannot encode this offset in a a single ldr. Try to encode it as an