Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_arm64_MacroAssembler_arm64_inl_h
#define jit_arm64_MacroAssembler_arm64_inl_h
#include "jit/arm64/MacroAssembler-arm64.h"
namespace js {
namespace jit {
//{{{ check_macroassembler_style
void MacroAssembler::move64(Register64 src, Register64 dest) {
Mov(ARMRegister(dest.reg, 64), ARMRegister(src.reg, 64));
}
void MacroAssembler::move64(Imm64 imm, Register64 dest) {
Mov(ARMRegister(dest.reg, 64), imm.value);
}
void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
Fmov(ARMRegister(dest, 32), ARMFPRegister(src, 32));
}
void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
Fmov(ARMFPRegister(dest, 32), ARMRegister(src, 32));
}
void MacroAssembler::move8ZeroExtend(Register src, Register dest) {
Uxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void MacroAssembler::move8SignExtend(Register src, Register dest) {
Sxtb(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void MacroAssembler::move16SignExtend(Register src, Register dest) {
Sxth(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
Fmov(ARMRegister(dest.reg, 64), ARMFPRegister(src, 64));
}
void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
Fmov(ARMFPRegister(dest, 64), ARMRegister(src.reg, 64));
}
void MacroAssembler::move64To32(Register64 src, Register dest) {
Mov(ARMRegister(dest, 32), ARMRegister(src.reg, 32));
}
void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
Uxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 64));
}
void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
Sxtb(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
Sxth(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
Sxtw(ARMRegister(dest.reg, 64), ARMRegister(src, 32));
}
void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
Sxtw(ARMRegister(dest, 64), ARMRegister(src, 32));
}
void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
Uxtw(ARMRegister(dest, 64), ARMRegister(src, 64));
}
// ===============================================================
// Load instructions
void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
load32(src, dest);
move32To64SignExtend(dest, Register64(dest));
}
void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(lr, dest); }
// ===============================================================
// Logical instructions
void MacroAssembler::not32(Register reg) {
Orn(ARMRegister(reg, 32), vixl::wzr, ARMRegister(reg, 32));
}
void MacroAssembler::notPtr(Register reg) {
Orn(ARMRegister(reg, 64), vixl::xzr, ARMRegister(reg, 64));
}
void MacroAssembler::and32(Register src, Register dest) {
And(ARMRegister(dest, 32), ARMRegister(dest, 32),
Operand(ARMRegister(src, 32)));
}
void MacroAssembler::and32(Imm32 imm, Register dest) {
And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
}
void MacroAssembler::and32(Imm32 imm, Register src, Register dest) {
And(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
}
void MacroAssembler::and32(Imm32 imm, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != dest.base);
load32(dest, scratch32.asUnsized());
And(scratch32, scratch32, Operand(imm.value));
store32(scratch32.asUnsized(), dest);
}
void MacroAssembler::and32(const Address& src, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != src.base);
load32(src, scratch32.asUnsized());
And(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
}
void MacroAssembler::andPtr(Register src, Register dest) {
And(ARMRegister(dest, 64), ARMRegister(dest, 64),
Operand(ARMRegister(src, 64)));
}
void MacroAssembler::andPtr(Imm32 imm, Register dest) {
And(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
}
void MacroAssembler::and64(Imm64 imm, Register64 dest) {
And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
void MacroAssembler::and64(Register64 src, Register64 dest) {
And(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
ARMRegister(src.reg, 64));
}
void MacroAssembler::or64(Imm64 imm, Register64 dest) {
Orr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
void MacroAssembler::or32(Imm32 imm, Register dest) {
Orr(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
}
void MacroAssembler::or32(Register src, Register dest) {
Orr(ARMRegister(dest, 32), ARMRegister(dest, 32),
Operand(ARMRegister(src, 32)));
}
void MacroAssembler::or32(Imm32 imm, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != dest.base);
load32(dest, scratch32.asUnsized());
Orr(scratch32, scratch32, Operand(imm.value));
store32(scratch32.asUnsized(), dest);
}
void MacroAssembler::orPtr(Register src, Register dest) {
Orr(ARMRegister(dest, 64), ARMRegister(dest, 64),
Operand(ARMRegister(src, 64)));
}
void MacroAssembler::orPtr(Imm32 imm, Register dest) {
Orr(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
}
void MacroAssembler::or64(Register64 src, Register64 dest) {
orPtr(src.reg, dest.reg);
}
void MacroAssembler::xor64(Register64 src, Register64 dest) {
xorPtr(src.reg, dest.reg);
}
void MacroAssembler::xor32(Register src, Register dest) {
Eor(ARMRegister(dest, 32), ARMRegister(dest, 32),
Operand(ARMRegister(src, 32)));
}
void MacroAssembler::xor32(Imm32 imm, Register dest) {
Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
}
void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != dest.base);
load32(dest, scratch32.asUnsized());
Eor(scratch32, scratch32, Operand(imm.value));
store32(scratch32.asUnsized(), dest);
}
void MacroAssembler::xor32(const Address& src, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != src.base);
load32(src, scratch32.asUnsized());
Eor(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
}
void MacroAssembler::xorPtr(Register src, Register dest) {
Eor(ARMRegister(dest, 64), ARMRegister(dest, 64),
Operand(ARMRegister(src, 64)));
}
void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
Eor(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
}
void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
Eor(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
// ===============================================================
// Swap instructions
void MacroAssembler::byteSwap16SignExtend(Register reg) {
rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
sxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
}
void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
rev16(ARMRegister(reg, 32), ARMRegister(reg, 32));
uxth(ARMRegister(reg, 32), ARMRegister(reg, 32));
}
void MacroAssembler::byteSwap32(Register reg) {
rev(ARMRegister(reg, 32), ARMRegister(reg, 32));
}
void MacroAssembler::byteSwap64(Register64 reg) {
rev(ARMRegister(reg.reg, 64), ARMRegister(reg.reg, 64));
}
// ===============================================================
// Arithmetic functions
void MacroAssembler::add32(Register src, Register dest) {
Add(ARMRegister(dest, 32), ARMRegister(dest, 32),
Operand(ARMRegister(src, 32)));
}
void MacroAssembler::add32(Imm32 imm, Register dest) {
Add(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
}
void MacroAssembler::add32(Imm32 imm, Register src, Register dest) {
Add(ARMRegister(dest, 32), ARMRegister(src, 32), Operand(imm.value));
}
void MacroAssembler::add32(Imm32 imm, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != dest.base);
Ldr(scratch32, toMemOperand(dest));
Add(scratch32, scratch32, Operand(imm.value));
Str(scratch32, toMemOperand(dest));
}
void MacroAssembler::addPtr(Register src, Register dest) {
addPtr(src, dest, dest);
}
void MacroAssembler::addPtr(Register src1, Register src2, Register dest) {
Add(ARMRegister(dest, 64), ARMRegister(src1, 64),
Operand(ARMRegister(src2, 64)));
}
void MacroAssembler::addPtr(Imm32 imm, Register dest) {
addPtr(imm, dest, dest);
}
void MacroAssembler::addPtr(Imm32 imm, Register src, Register dest) {
Add(ARMRegister(dest, 64), ARMRegister(src, 64), Operand(imm.value));
}
void MacroAssembler::addPtr(ImmWord imm, Register dest) {
Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
}
void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(scratch64.asUnsized() != dest.base);
Ldr(scratch64, toMemOperand(dest));
Add(scratch64, scratch64, Operand(imm.value));
Str(scratch64, toMemOperand(dest));
}
void MacroAssembler::addPtr(const Address& src, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(scratch64.asUnsized() != src.base);
Ldr(scratch64, toMemOperand(src));
Add(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
}
void MacroAssembler::add64(Register64 src, Register64 dest) {
addPtr(src.reg, dest.reg);
}
void MacroAssembler::add64(Imm32 imm, Register64 dest) {
Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
void MacroAssembler::add64(Imm64 imm, Register64 dest) {
Add(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
AutoForbidPoolsAndNops afp(this,
/* max number of instructions in scope = */ 3);
CodeOffset offs = CodeOffset(currentOffset());
movz(scratch, 0, 0);
movk(scratch, 0, 16);
Sub(ARMRegister(dest, 64), sp, scratch);
return offs;
}
void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
Instruction* i1 = getInstructionAt(BufferOffset(offset.offset()));
MOZ_ASSERT(i1->IsMovz());
i1->SetInstructionBits(i1->InstructionBits() |
ImmMoveWide(uint16_t(imm.value)));
Instruction* i2 = getInstructionAt(BufferOffset(offset.offset() + 4));
MOZ_ASSERT(i2->IsMovk());
i2->SetInstructionBits(i2->InstructionBits() |
ImmMoveWide(uint16_t(imm.value >> 16)));
}
void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
fadd(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
ARMFPRegister(src, 64));
}
void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
fadd(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
ARMFPRegister(src, 32));
}
void MacroAssembler::sub32(Imm32 imm, Register dest) {
Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(imm.value));
}
void MacroAssembler::sub32(Register src, Register dest) {
Sub(ARMRegister(dest, 32), ARMRegister(dest, 32),
Operand(ARMRegister(src, 32)));
}
void MacroAssembler::sub32(const Address& src, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != src.base);
load32(src, scratch32.asUnsized());
Sub(ARMRegister(dest, 32), ARMRegister(dest, 32), Operand(scratch32));
}
void MacroAssembler::subPtr(Register src, Register dest) {
Sub(ARMRegister(dest, 64), ARMRegister(dest, 64),
Operand(ARMRegister(src, 64)));
}
void MacroAssembler::subPtr(Register src, const Address& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(scratch64.asUnsized() != dest.base);
Ldr(scratch64, toMemOperand(dest));
Sub(scratch64, scratch64, Operand(ARMRegister(src, 64)));
Str(scratch64, toMemOperand(dest));
}
void MacroAssembler::subPtr(Imm32 imm, Register dest) {
Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(imm.value));
}
void MacroAssembler::subPtr(const Address& addr, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(scratch64.asUnsized() != addr.base);
Ldr(scratch64, toMemOperand(addr));
Sub(ARMRegister(dest, 64), ARMRegister(dest, 64), Operand(scratch64));
}
void MacroAssembler::sub64(Register64 src, Register64 dest) {
Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
ARMRegister(src.reg, 64));
}
void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
Sub(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), Operand(imm.value));
}
void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
fsub(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
ARMFPRegister(src, 64));
}
void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
fsub(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
ARMFPRegister(src, 32));
}
void MacroAssembler::mul32(Register rhs, Register srcDest) {
mul32(srcDest, rhs, srcDest, nullptr);
}
void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
move32(imm, scratch32.asUnsized());
mul32(scratch32.asUnsized(), srcDest);
}
void MacroAssembler::mul32(Register src1, Register src2, Register dest,
Label* onOver) {
if (onOver) {
Smull(ARMRegister(dest, 64), ARMRegister(src1, 32), ARMRegister(src2, 32));
Cmp(ARMRegister(dest, 64), Operand(ARMRegister(dest, 32), vixl::SXTW));
B(onOver, NotEqual);
// Clear upper 32 bits.
Uxtw(ARMRegister(dest, 64), ARMRegister(dest, 64));
} else {
Mul(ARMRegister(dest, 32), ARMRegister(src1, 32), ARMRegister(src2, 32));
}
}
void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
Mov(scratch32, int32_t(imm.value));
Umull(ARMRegister(dest, 64), scratch32, ARMRegister(src, 32));
Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), 32);
}
void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
Mul(ARMRegister(srcDest, 64), ARMRegister(srcDest, 64), ARMRegister(rhs, 64));
}
void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(dest.reg != scratch64.asUnsized());
mov(ImmWord(imm.value), scratch64.asUnsized());
Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), scratch64);
}
void MacroAssembler::mul64(const Register64& src, const Register64& dest,
const Register temp) {
MOZ_ASSERT(temp == Register::Invalid());
Mul(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64),
ARMRegister(src.reg, 64));
}
void MacroAssembler::mul64(const Register64& src1, const Register64& src2,
const Register64& dest) {
Mul(ARMRegister(dest.reg, 64), ARMRegister(src1.reg, 64),
ARMRegister(src2.reg, 64));
}
void MacroAssembler::mul64(Imm64 src1, const Register64& src2,
const Register64& dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
MOZ_ASSERT(dest.reg != scratch64.asUnsized());
mov(ImmWord(src1.value), scratch64.asUnsized());
Mul(ARMRegister(dest.reg, 64), ARMRegister(src2.reg, 64), scratch64);
}
void MacroAssembler::mulBy3(Register src, Register dest) {
ARMRegister xdest(dest, 64);
ARMRegister xsrc(src, 64);
Add(xdest, xsrc, Operand(xsrc, vixl::LSL, 1));
}
void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
fmul(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
ARMFPRegister(src, 32));
}
void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
ARMFPRegister(src, 64));
}
void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
FloatRegister dest) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(temp != scratch);
movePtr(imm, scratch);
const ARMFPRegister scratchDouble = temps.AcquireD();
Ldr(scratchDouble, MemOperand(Address(scratch, 0)));
fmul(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64), scratchDouble);
}
void MacroAssembler::quotient32(Register rhs, Register srcDest,
bool isUnsigned) {
if (isUnsigned) {
Udiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
ARMRegister(rhs, 32));
} else {
Sdiv(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32),
ARMRegister(rhs, 32));
}
}
// This does not deal with x % 0 or INT_MIN % -1, the caller needs to filter
// those cases when they may occur.
void MacroAssembler::remainder32(Register rhs, Register srcDest,
bool isUnsigned) {
vixl::UseScratchRegisterScope temps(this);
ARMRegister scratch = temps.AcquireW();
if (isUnsigned) {
Udiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
} else {
Sdiv(scratch, ARMRegister(srcDest, 32), ARMRegister(rhs, 32));
}
Mul(scratch, scratch, ARMRegister(rhs, 32));
Sub(ARMRegister(srcDest, 32), ARMRegister(srcDest, 32), scratch);
}
void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
fdiv(ARMFPRegister(dest, 32), ARMFPRegister(dest, 32),
ARMFPRegister(src, 32));
}
void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
fdiv(ARMFPRegister(dest, 64), ARMFPRegister(dest, 64),
ARMFPRegister(src, 64));
}
void MacroAssembler::inc64(AbsoluteAddress dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratchAddr64 = temps.AcquireX();
const ARMRegister scratch64 = temps.AcquireX();
Mov(scratchAddr64, uint64_t(dest.addr));
Ldr(scratch64, MemOperand(scratchAddr64, 0));
Add(scratch64, scratch64, Operand(1));
Str(scratch64, MemOperand(scratchAddr64, 0));
}
void MacroAssembler::neg32(Register reg) {
Neg(ARMRegister(reg, 32), Operand(ARMRegister(reg, 32)));
}
void MacroAssembler::neg64(Register64 reg) { negPtr(reg.reg); }
void MacroAssembler::negPtr(Register reg) {
Neg(ARMRegister(reg, 64), Operand(ARMRegister(reg, 64)));
}
void MacroAssembler::negateFloat(FloatRegister reg) {
fneg(ARMFPRegister(reg, 32), ARMFPRegister(reg, 32));
}
void MacroAssembler::negateDouble(FloatRegister reg) {
fneg(ARMFPRegister(reg, 64), ARMFPRegister(reg, 64));
}
void MacroAssembler::abs32(Register src, Register dest) {
Cmp(ARMRegister(src, 32), wzr);
Cneg(ARMRegister(dest, 32), ARMRegister(src, 32), Assembler::LessThan);
}
void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
fabs(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
}
void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
fabs(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
}
void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
fsqrt(ARMFPRegister(dest, 32), ARMFPRegister(src, 32));
}
void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
fsqrt(ARMFPRegister(dest, 64), ARMFPRegister(src, 64));
}
void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
bool handleNaN) {
MOZ_ASSERT(handleNaN); // Always true for wasm
fmin(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
ARMFPRegister(other, 32));
}
void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
bool handleNaN) {
MOZ_ASSERT(handleNaN); // Always true for wasm
fmin(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
ARMFPRegister(other, 64));
}
void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
bool handleNaN) {
MOZ_ASSERT(handleNaN); // Always true for wasm
fmax(ARMFPRegister(srcDest, 32), ARMFPRegister(srcDest, 32),
ARMFPRegister(other, 32));
}
void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
bool handleNaN) {
MOZ_ASSERT(handleNaN); // Always true for wasm
fmax(ARMFPRegister(srcDest, 64), ARMFPRegister(srcDest, 64),
ARMFPRegister(other, 64));
}
// ===============================================================
// Shift functions
void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
}
void MacroAssembler::lshiftPtr(Register shift, Register dest) {
Lsl(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
}
void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
lshiftPtr(imm, dest.reg);
}
void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
Lsl(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
ARMRegister(shift, 64));
}
void MacroAssembler::lshift32(Register shift, Register dest) {
Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
}
void MacroAssembler::flexibleLshift32(Register src, Register dest) {
lshift32(src, dest);
}
void MacroAssembler::lshift32(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 32);
Lsl(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
}
void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
}
void MacroAssembler::rshiftPtr(Imm32 imm, Register src, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
Lsr(ARMRegister(dest, 64), ARMRegister(src, 64), imm.value);
}
void MacroAssembler::rshiftPtr(Register shift, Register dest) {
Lsr(ARMRegister(dest, 64), ARMRegister(dest, 64), ARMRegister(shift, 64));
}
void MacroAssembler::rshift32(Register shift, Register dest) {
Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
}
void MacroAssembler::flexibleRshift32(Register src, Register dest) {
rshift32(src, dest);
}
void MacroAssembler::rshift32(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 32);
Lsr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
}
void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
Asr(ARMRegister(dest, 64), ARMRegister(dest, 64), imm.value);
}
void MacroAssembler::rshift32Arithmetic(Register shift, Register dest) {
Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), ARMRegister(shift, 32));
}
void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 32);
Asr(ARMRegister(dest, 32), ARMRegister(dest, 32), imm.value);
}
void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
rshift32Arithmetic(src, dest);
}
void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
rshiftPtr(imm, dest.reg);
}
void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
Lsr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
ARMRegister(shift, 64));
}
void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
Asr(ARMRegister(dest.reg, 64), ARMRegister(dest.reg, 64), imm.value);
}
void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
Asr(ARMRegister(srcDest.reg, 64), ARMRegister(srcDest.reg, 64),
ARMRegister(shift, 64));
}
// ===============================================================
// Condition functions
void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
Register dest) {
vixl::UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
switch (cond) {
case Assembler::Equal:
case Assembler::NotEqual:
case Assembler::Above:
case Assembler::AboveOrEqual:
case Assembler::Below:
case Assembler::BelowOrEqual:
load8ZeroExtend(lhs, scratch);
cmp32Set(cond, scratch, Imm32(uint8_t(rhs.value)), dest);
break;
case Assembler::GreaterThan:
case Assembler::GreaterThanOrEqual:
case Assembler::LessThan:
case Assembler::LessThanOrEqual:
load8SignExtend(lhs, scratch);
cmp32Set(cond, scratch, Imm32(int8_t(rhs.value)), dest);
break;
default:
MOZ_CRASH("unexpected condition");
}
}
void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
Register dest) {
vixl::UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
switch (cond) {
case Assembler::Equal:
case Assembler::NotEqual:
case Assembler::Above:
case Assembler::AboveOrEqual:
case Assembler::Below:
case Assembler::BelowOrEqual:
load16ZeroExtend(lhs, scratch);
cmp32Set(cond, scratch, Imm32(uint16_t(rhs.value)), dest);
break;
case Assembler::GreaterThan:
case Assembler::GreaterThanOrEqual:
case Assembler::LessThan:
case Assembler::LessThanOrEqual:
load16SignExtend(lhs, scratch);
cmp32Set(cond, scratch, Imm32(int16_t(rhs.value)), dest);
break;
default:
MOZ_CRASH("unexpected condition");
}
}
template <typename T1, typename T2>
void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
cmp32(lhs, rhs);
emitSet(cond, dest);
}
void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
Register dest) {
cmpPtrSet(cond, lhs, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
}
template <typename T1, typename T2>
void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
cmpPtr(lhs, rhs);
emitSet(cond, dest);
}
// ===============================================================
// Rotation functions
void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), (32 - count.value) & 31);
}
void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireW();
// Really 32 - count, but the upper bits of the result are ignored.
Neg(scratch, ARMRegister(count, 32));
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), scratch);
}
void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), count.value & 31);
}
void MacroAssembler::rotateRight(Register count, Register input,
Register dest) {
Ror(ARMRegister(dest, 32), ARMRegister(input, 32), ARMRegister(count, 32));
}
void MacroAssembler::rotateLeft64(Register count, Register64 input,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == Register::Invalid());
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch = temps.AcquireX();
// Really 64 - count, but the upper bits of the result are ignored.
Neg(scratch, ARMRegister(count, 64));
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), scratch);
}
void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
(64 - count.value) & 63);
}
void MacroAssembler::rotateRight64(Register count, Register64 input,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64),
ARMRegister(count, 64));
}
void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == Register::Invalid());
Ror(ARMRegister(dest.reg, 64), ARMRegister(input.reg, 64), count.value & 63);
}
// ===============================================================
// Bit counting functions
void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
Clz(ARMRegister(dest, 32), ARMRegister(src, 32));
}
void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
Rbit(ARMRegister(dest, 32), ARMRegister(src, 32));
Clz(ARMRegister(dest, 32), ARMRegister(dest, 32));
}
void MacroAssembler::clz64(Register64 src, Register dest) {
Clz(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
}
void MacroAssembler::ctz64(Register64 src, Register dest) {
Rbit(ARMRegister(dest, 64), ARMRegister(src.reg, 64));
Clz(ARMRegister(dest, 64), ARMRegister(dest, 64));
}
void MacroAssembler::popcnt32(Register src_, Register dest_, Register tmp_) {
MOZ_ASSERT(tmp_ != Register::Invalid());
// Equivalent to mozilla::CountPopulation32().
ARMRegister src(src_, 32);
ARMRegister dest(dest_, 32);
ARMRegister tmp(tmp_, 32);
Mov(tmp, src);
if (src_ != dest_) {
Mov(dest, src);
}
Lsr(dest, dest, 1);
And(dest, dest, 0x55555555);
Sub(dest, tmp, dest);
Lsr(tmp, dest, 2);
And(tmp, tmp, 0x33333333);
And(dest, dest, 0x33333333);
Add(dest, tmp, dest);
Add(dest, dest, Operand(dest, vixl::LSR, 4));
And(dest, dest, 0x0F0F0F0F);
Add(dest, dest, Operand(dest, vixl::LSL, 8));
Add(dest, dest, Operand(dest, vixl::LSL, 16));
Lsr(dest, dest, 24);
}
void MacroAssembler::popcnt64(Register64 src_, Register64 dest_,
Register tmp_) {
MOZ_ASSERT(tmp_ != Register::Invalid());
// Equivalent to mozilla::CountPopulation64(), though likely more efficient.
ARMRegister src(src_.reg, 64);
ARMRegister dest(dest_.reg, 64);
ARMRegister tmp(tmp_, 64);
Mov(tmp, src);
if (src_ != dest_) {
Mov(dest, src);
}
Lsr(dest, dest, 1);
And(dest, dest, 0x5555555555555555);
Sub(dest, tmp, dest);
Lsr(tmp, dest, 2);
And(tmp, tmp, 0x3333333333333333);
And(dest, dest, 0x3333333333333333);
Add(dest, tmp, dest);
Add(dest, dest, Operand(dest, vixl::LSR, 4));
And(dest, dest, 0x0F0F0F0F0F0F0F0F);
Add(dest, dest, Operand(dest, vixl::LSL, 8));
Add(dest, dest, Operand(dest, vixl::LSL, 16));
Add(dest, dest, Operand(dest, vixl::LSL, 32));
Lsr(dest, dest, 56);
}
// ===============================================================
// Branch functions
void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
switch (cond) {
case Assembler::Equal:
case Assembler::NotEqual:
case Assembler::Above:
case Assembler::AboveOrEqual:
case Assembler::Below:
case Assembler::BelowOrEqual:
load8ZeroExtend(lhs, scratch);
branch32(cond, scratch, Imm32(uint8_t(rhs.value)), label);
break;
case Assembler::GreaterThan:
case Assembler::GreaterThanOrEqual:
case Assembler::LessThan:
case Assembler::LessThanOrEqual:
load8SignExtend(lhs, scratch);
branch32(cond, scratch, Imm32(int8_t(rhs.value)), label);
break;
default:
MOZ_CRASH("unexpected condition");
}
}
void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
switch (cond) {
case Assembler::Equal:
case Assembler::NotEqual:
case Assembler::Above:
case Assembler::AboveOrEqual:
case Assembler::Below:
case Assembler::BelowOrEqual:
load8ZeroExtend(lhs, scratch);
branch32(cond, scratch, rhs, label);
break;
case Assembler::GreaterThan:
case Assembler::GreaterThanOrEqual:
case Assembler::LessThan:
case Assembler::LessThanOrEqual:
load8SignExtend(lhs, scratch);
branch32(cond, scratch, rhs, label);
break;
default:
MOZ_CRASH("unexpected condition");
}
}
void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
switch (cond) {
case Assembler::Equal:
case Assembler::NotEqual:
case Assembler::Above:
case Assembler::AboveOrEqual:
case Assembler::Below:
case Assembler::BelowOrEqual:
load16ZeroExtend(lhs, scratch);
branch32(cond, scratch, Imm32(uint16_t(rhs.value)), label);
break;
case Assembler::GreaterThan:
case Assembler::GreaterThanOrEqual:
case Assembler::LessThan:
case Assembler::LessThanOrEqual:
load16SignExtend(lhs, scratch);
branch32(cond, scratch, Imm32(int16_t(rhs.value)), label);
break;
default:
MOZ_CRASH("unexpected condition");
}
}
template <class L>
void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
L label) {
cmp32(lhs, rhs);
B(label, cond);
}
template <class L>
void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
L label) {
if (imm.value == 0 && cond == Assembler::Equal) {
Cbz(ARMRegister(lhs, 32), label);
} else if (imm.value == 0 && cond == Assembler::NotEqual) {
Cbnz(ARMRegister(lhs, 32), label);
} else {
cmp32(lhs, imm);
B(label, cond);
}
}
void MacroAssembler::branch32(Condition cond, Register lhs, const Address& rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs);
MOZ_ASSERT(scratch != rhs.base);
load32(rhs, scratch);
branch32(cond, lhs, scratch, label);
}
void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
MOZ_ASSERT(scratch != rhs);
load32(lhs, scratch);
branch32(cond, scratch, rhs, label);
}
void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 imm,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
load32(lhs, scratch);
branch32(cond, scratch, imm, label);
}
void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
Register rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
movePtr(ImmPtr(lhs.addr), scratch);
branch32(cond, Address(scratch, 0), rhs, label);
}
void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
Imm32 rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
load32(lhs, scratch);
branch32(cond, scratch, rhs, label);
}
void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch32 = temps.AcquireW();
MOZ_ASSERT(scratch32.asUnsized() != lhs.base);
MOZ_ASSERT(scratch32.asUnsized() != lhs.index);
doBaseIndex(scratch32, lhs, vixl::LDR_w);
branch32(cond, scratch32.asUnsized(), rhs, label);
}
void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
Imm32 rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
movePtr(lhs, scratch);
branch32(cond, Address(scratch, 0), rhs, label);
}
void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
Label* success, Label* fail) {
if (val.value == 0 && cond == Assembler::Equal) {
Cbz(ARMRegister(lhs.reg, 64), success);
} else if (val.value == 0 && cond == Assembler::NotEqual) {
Cbnz(ARMRegister(lhs.reg, 64), success);
} else {
Cmp(ARMRegister(lhs.reg, 64), val.value);
B(success, cond);
}
if (fail) {
B(fail);
}
}
void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
Label* success, Label* fail) {
Cmp(ARMRegister(lhs.reg, 64), ARMRegister(rhs.reg, 64));
B(success, cond);
if (fail) {
B(fail);
}
}
void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
Label* label) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
"other condition codes not supported");
branchPtr(cond, lhs, ImmWord(val.value), label);
}
void MacroAssembler::branch64(Condition cond, const Address& lhs,
Register64 rhs, Label* label) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
"other condition codes not supported");
branchPtr(cond, lhs, rhs.reg, label);
}
void MacroAssembler::branch64(Condition cond, const Address& lhs,
const Address& rhs, Register scratch,
Label* label) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
"other condition codes not supported");
MOZ_ASSERT(lhs.base != scratch);
MOZ_ASSERT(rhs.base != scratch);
loadPtr(rhs, scratch);
branchPtr(cond, lhs, scratch, label);
}
template <class L>
void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
L label) {
Cmp(ARMRegister(lhs, 64), ARMRegister(rhs, 64));
B(label, cond);
}
void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
Label* label) {
if (rhs.value == 0 && cond == Assembler::Equal) {
Cbz(ARMRegister(lhs, 64), label);
} else if (rhs.value == 0 && cond == Assembler::NotEqual) {
Cbnz(ARMRegister(lhs, 64), label);
} else {
cmpPtr(lhs, rhs);
B(label, cond);
}
}
void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
Label* label) {
if (rhs.value == 0 && cond == Assembler::Equal) {
Cbz(ARMRegister(lhs, 64), label);
} else if (rhs.value == 0 && cond == Assembler::NotEqual) {
Cbnz(ARMRegister(lhs, 64), label);
} else {
cmpPtr(lhs, rhs);
B(label, cond);
}
}
void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs);
movePtr(rhs, scratch);
branchPtr(cond, lhs, scratch, label);
}
void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
Label* label) {
if (rhs.value == 0 && cond == Assembler::Equal) {
Cbz(ARMRegister(lhs, 64), label);
} else if (rhs.value == 0 && cond == Assembler::NotEqual) {
Cbnz(ARMRegister(lhs, 64), label);
} else {
cmpPtr(lhs, rhs);
B(label, cond);
}
}
template <class L>
void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
L label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
MOZ_ASSERT(scratch != rhs);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch1_64 = temps.AcquireX();
const ARMRegister scratch2_64 = temps.AcquireX();
MOZ_ASSERT(scratch1_64.asUnsized() != lhs.base);
MOZ_ASSERT(scratch2_64.asUnsized() != lhs.base);
movePtr(rhs, scratch1_64.asUnsized());
loadPtr(lhs, scratch2_64.asUnsized());
branchPtr(cond, scratch2_64.asUnsized(), scratch1_64.asUnsized(), label);
}
void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
Register rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != rhs);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
ImmWord rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
Register rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != rhs);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
ImmWord rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
MOZ_ASSERT(scratch != lhs.index);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
Register rhs, Label* label) {
vixl::UseScratchRegisterScope temps(this);
const Register scratch = temps.AcquireX().asUnsized();
MOZ_ASSERT(scratch != lhs.base);
MOZ_ASSERT(scratch != lhs.index);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
}
void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
Register rhs, Label* label) {
branchPtr(cond, lhs, rhs, label);
}
void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
FloatRegister rhs, Label* label) {
compareFloat(cond, lhs, rhs);
switch (cond) {
case DoubleNotEqual: {
Label unordered;
// not equal *and* ordered
branch(Overflow, &unordered);
branch(NotEqual, label);
bind(&unordered);
break;
}
case DoubleEqualOrUnordered:
branch(Overflow, label);
branch(Equal, label);
break;
default:
branch(Condition(cond), label);
}
}
void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
Register dest,
Label* fail) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
ARMFPRegister src32(src, 32);
ARMRegister dest64(dest, 64);
MOZ_ASSERT(!scratch64.Is(dest64));
// Convert scalar to signed 64-bit fixed-point, rounding toward zero.
// In the case of overflow, the output is saturated.
// In the case of NaN and -0, the output is zero.
Fcvtzs(dest64, src32);
// Fail if the result is saturated, i.e. it's either INT64_MIN or INT64_MAX.
Add(scratch64, dest64, Operand(0x7fff'ffff'ffff'ffff));
Cmn(scratch64, 3);
B(fail, Assembler::Above);
// Clear upper 32 bits.
Uxtw(dest64, dest64);
}
void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
Register dest, Label* fail) {
convertFloat32ToInt32(src, dest, fail, false);
}
void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
FloatRegister rhs, Label* label) {
compareDouble(cond, lhs, rhs);
switch (cond) {
case DoubleNotEqual: {
Label unordered;
// not equal *and* ordered
branch(Overflow, &unordered);
branch(NotEqual, label);
bind(&unordered);
break;
}
case DoubleEqualOrUnordered:
branch(Overflow, label);
branch(Equal, label);
break;
default:
branch(Condition(cond), label);
}
}
void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
Register dest,
Label* fail) {
// ARMv8.3 chips support the FJCVTZS instruction, which handles exactly this
// logic. But the simulator does not implement it.
#if defined(JS_SIMULATOR_ARM64)
const bool fjscvt = false;
#else
const bool fjscvt = CPUHas(vixl::CPUFeatures::kFP, vixl::CPUFeatures::kJSCVT);
#endif
if (fjscvt) {
Fjcvtzs(ARMRegister(dest, 32), ARMFPRegister(src, 64));
return;
}
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
// An out of range integer will be saturated to the destination size.
ARMFPRegister src64(src, 64);
ARMRegister dest64(dest, 64);
MOZ_ASSERT(!scratch64.Is(dest64));
// Convert scalar to signed 64-bit fixed-point, rounding toward zero.
// In the case of overflow, the output is saturated.
// In the case of NaN and -0, the output is zero.
Fcvtzs(dest64, src64);
// Fail if the result is saturated, i.e. it's either INT64_MIN or INT64_MAX.
Add(scratch64, dest64, Operand(0x7fff'ffff'ffff'ffff));
Cmn(scratch64, 3);
B(fail, Assembler::Above);
// Clear upper 32 bits.
Uxtw(dest64, dest64);
}
void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
Register dest, Label* fail) {
ARMFPRegister src64(src, 64);
ARMRegister dest64(dest, 64);
ARMRegister dest32(dest, 32);
// Convert scalar to signed 64-bit fixed-point, rounding toward zero.
// In the case of overflow, the output is saturated.
// In the case of NaN and -0, the output is zero.
Fcvtzs(dest64, src64);
// Fail on overflow cases.
Cmp(dest64, Operand(dest32, vixl::SXTW));
B(fail, Assembler::NotEqual);
// Clear upper 32 bits.
Uxtw(dest64, dest64);
}
template <typename T>
void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
Label* label) {
adds32(src, dest);
B(label, cond);
}
template <typename T>
void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
Label* label) {
subs32(src, dest);
branch(cond, label);
}
template <typename T>
void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
Label* label) {
MOZ_ASSERT(cond == Assembler::Overflow);
vixl::