Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_x64_MacroAssembler_x64_inl_h
#define jit_x64_MacroAssembler_x64_inl_h
#include "jit/x64/MacroAssembler-x64.h"
#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
namespace js {
namespace jit {
//{{{ check_macroassembler_style
// ===============================================================
void MacroAssembler::move64(Imm64 imm, Register64 dest) {
// Use mov instead of movq because it has special optimizations for imm == 0.
mov(ImmWord(imm.value), dest.reg);
}
void MacroAssembler::move64(Register64 src, Register64 dest) {
movq(src.reg, dest.reg);
}
void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
vmovq(src, dest.reg);
}
void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
vmovq(src.reg, dest);
}
void MacroAssembler::move64To32(Register64 src, Register dest) {
movl(src.reg, dest);
}
void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
movl(src, dest.reg);
}
void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
movsbq(Operand(src), dest.reg);
}
void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
movswq(Operand(src), dest.reg);
}
void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
movslq(src, dest.reg);
}
void MacroAssembler::move8SignExtendToPtr(Register src, Register dest) {
movsbq(Operand(src), dest);
}
void MacroAssembler::move16SignExtendToPtr(Register src, Register dest) {
movswq(Operand(src), dest);
}
void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
movslq(src, dest);
}
void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
movl(src, dest);
}
// ===============================================================
// Load instructions
void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
movslq(Operand(src), dest);
}
// ===============================================================
// Logical instructions
void MacroAssembler::notPtr(Register reg) { notq(reg); }
void MacroAssembler::andPtr(Register src, Register dest) { andq(src, dest); }
void MacroAssembler::andPtr(Imm32 imm, Register dest) { andq(imm, dest); }
void MacroAssembler::and64(Imm64 imm, Register64 dest) {
if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
andq(Imm32(imm.value), dest.reg);
} else {
ScratchRegisterScope scratch(*this);
movq(ImmWord(uintptr_t(imm.value)), scratch);
andq(scratch, dest.reg);
}
}
void MacroAssembler::or64(Imm64 imm, Register64 dest) {
if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
orq(Imm32(imm.value), dest.reg);
} else {
ScratchRegisterScope scratch(*this);
movq(ImmWord(uintptr_t(imm.value)), scratch);
orq(scratch, dest.reg);
}
}
void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
xorq(Imm32(imm.value), dest.reg);
} else {
ScratchRegisterScope scratch(*this);
movq(ImmWord(uintptr_t(imm.value)), scratch);
xorq(scratch, dest.reg);
}
}
void MacroAssembler::orPtr(Register src, Register dest) { orq(src, dest); }
void MacroAssembler::orPtr(Imm32 imm, Register dest) { orq(imm, dest); }
void MacroAssembler::and64(Register64 src, Register64 dest) {
andq(src.reg, dest.reg);
}
void MacroAssembler::or64(Register64 src, Register64 dest) {
orq(src.reg, dest.reg);
}
void MacroAssembler::xor64(Register64 src, Register64 dest) {
xorq(src.reg, dest.reg);
}
void MacroAssembler::xorPtr(Register src, Register dest) { xorq(src, dest); }
void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorq(imm, dest); }
void MacroAssembler::and64(const Operand& src, Register64 dest) {
andq(src, dest.reg);
}
void MacroAssembler::or64(const Operand& src, Register64 dest) {
orq(src, dest.reg);
}
void MacroAssembler::xor64(const Operand& src, Register64 dest) {
xorq(src, dest.reg);
}
// ===============================================================
// Swap instructions
void MacroAssembler::byteSwap64(Register64 reg) { bswapq(reg.reg); }
// ===============================================================
// Arithmetic functions
void MacroAssembler::addPtr(Register src, Register dest) { addq(src, dest); }
void MacroAssembler::addPtr(Imm32 imm, Register dest) { addq(imm, dest); }
void MacroAssembler::addPtr(ImmWord imm, Register dest) {
ScratchRegisterScope scratch(*this);
MOZ_ASSERT(dest != scratch);
if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
addq(Imm32((int32_t)imm.value), dest);
} else {
mov(imm, scratch);
addq(scratch, dest);
}
}
void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
addq(imm, Operand(dest));
}
void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
addq(imm, Operand(dest));
}
void MacroAssembler::addPtr(const Address& src, Register dest) {
addq(Operand(src), dest);
}
void MacroAssembler::add64(const Operand& src, Register64 dest) {
addq(src, dest.reg);
}
void MacroAssembler::add64(Register64 src, Register64 dest) {
addq(src.reg, dest.reg);
}
void MacroAssembler::add64(Imm32 imm, Register64 dest) { addq(imm, dest.reg); }
void MacroAssembler::add64(Imm64 imm, Register64 dest) {
addPtr(ImmWord(imm.value), dest.reg);
}
CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
moveStackPtrTo(dest);
addqWithPatch(Imm32(0), dest);
return CodeOffset(currentOffset());
}
void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
patchAddq(offset, -imm.value);
}
void MacroAssembler::subPtr(Register src, Register dest) { subq(src, dest); }
void MacroAssembler::subPtr(Register src, const Address& dest) {
subq(src, Operand(dest));
}
void MacroAssembler::subPtr(Imm32 imm, Register dest) { subq(imm, dest); }
void MacroAssembler::subPtr(ImmWord imm, Register dest) {
ScratchRegisterScope scratch(*this);
MOZ_ASSERT(dest != scratch);
if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
subq(Imm32((int32_t)imm.value), dest);
} else {
mov(imm, scratch);
subq(scratch, dest);
}
}
void MacroAssembler::subPtr(const Address& addr, Register dest) {
subq(Operand(addr), dest);
}
void MacroAssembler::sub64(const Operand& src, Register64 dest) {
subq(src, dest.reg);
}
void MacroAssembler::sub64(Register64 src, Register64 dest) {
subq(src.reg, dest.reg);
}
void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
subPtr(ImmWord(imm.value), dest.reg);
}
void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
// To compute the unsigned multiplication using imulq, we have to ensure both
// operands don't have any bits set in the high word.
if (imm.value >= 0) {
// Clear the high word of |src|.
movl(src, src);
// |imm| and |src| are both positive, so directly perform imulq.
imulq(imm, src, dest);
} else {
// Store the low word of |src| into |dest|.
movl(src, dest);
// Compute the unsigned value of |imm| before performing imulq.
movl(imm, ScratchReg);
imulq(ScratchReg, dest);
}
// Move the high word into |dest|.
shrq(Imm32(32), dest);
}
void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
imulq(rhs, srcDest);
}
void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
const Register temp) {
MOZ_ASSERT(temp == InvalidReg);
mul64(imm, dest);
}
void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
if (INT32_MIN <= int64_t(imm.value) && int64_t(imm.value) <= INT32_MAX) {
imulq(Imm32((int32_t)imm.value), dest.reg, dest.reg);
} else {
movq(ImmWord(uintptr_t(imm.value)), ScratchReg);
imulq(ScratchReg, dest.reg);
}
}
void MacroAssembler::mul64(const Register64& src, const Register64& dest,
const Register temp) {
MOZ_ASSERT(temp == InvalidReg);
mul64(Operand(src.reg), dest);
}
void MacroAssembler::mul64(const Operand& src, const Register64& dest) {
imulq(src, dest.reg);
}
void MacroAssembler::mul64(const Operand& src, const Register64& dest,
const Register temp) {
MOZ_ASSERT(temp == InvalidReg);
mul64(src, dest);
}
void MacroAssembler::mulBy3(Register src, Register dest) {
lea(Operand(src, src, TimesTwo), dest);
}
void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
FloatRegister dest) {
movq(imm, ScratchReg);
vmulsd(Operand(ScratchReg, 0), dest, dest);
}
void MacroAssembler::inc64(AbsoluteAddress dest) {
if (X86Encoding::IsAddressImmediate(dest.addr)) {
addPtr(Imm32(1), dest);
} else {
ScratchRegisterScope scratch(*this);
mov(ImmPtr(dest.addr), scratch);
addPtr(Imm32(1), Address(scratch, 0));
}
}
void MacroAssembler::neg64(Register64 reg) { negq(reg.reg); }
void MacroAssembler::negPtr(Register reg) { negq(reg); }
// ===============================================================
// Shift functions
void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
shlq(imm, dest);
}
void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
if (Assembler::HasBMI2()) {
shlxq(srcDest, shift, srcDest);
return;
}
MOZ_ASSERT(shift == rcx);
shlq_cl(srcDest);
}
void MacroAssembler::flexibleLshiftPtr(Register shift, Register srcDest) {
if (HasBMI2()) {
shlxq(srcDest, shift, srcDest);
return;
}
if (shift == rcx) {
shlq_cl(srcDest);
} else {
// Shift amount must be in rcx.
xchg(shift, rcx);
shlq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
xchg(shift, rcx);
}
}
void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
lshiftPtr(imm, dest.reg);
}
void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
if (Assembler::HasBMI2()) {
shlxq(srcDest.reg, shift, srcDest.reg);
return;
}
MOZ_ASSERT(shift == rcx);
shlq_cl(srcDest.reg);
}
void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
shrq(imm, dest);
}
void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
if (Assembler::HasBMI2()) {
shrxq(srcDest, shift, srcDest);
return;
}
MOZ_ASSERT(shift == rcx);
shrq_cl(srcDest);
}
void MacroAssembler::flexibleRshiftPtr(Register shift, Register srcDest) {
if (HasBMI2()) {
shrxq(srcDest, shift, srcDest);
return;
}
if (shift == rcx) {
shrq_cl(srcDest);
} else {
// Shift amount must be in rcx.
xchg(shift, rcx);
shrq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
xchg(shift, rcx);
}
}
void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
rshiftPtr(imm, dest.reg);
}
void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
if (Assembler::HasBMI2()) {
shrxq(srcDest.reg, shift, srcDest.reg);
return;
}
MOZ_ASSERT(shift == rcx);
shrq_cl(srcDest.reg);
}
void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
sarq(imm, dest);
}
void MacroAssembler::rshiftPtrArithmetic(Register shift, Register srcDest) {
if (Assembler::HasBMI2()) {
sarxq(srcDest, shift, srcDest);
return;
}
MOZ_ASSERT(shift == rcx);
sarq_cl(srcDest);
}
void MacroAssembler::flexibleRshiftPtrArithmetic(Register shift,
Register srcDest) {
if (HasBMI2()) {
sarxq(srcDest, shift, srcDest);
return;
}
if (shift == rcx) {
sarq_cl(srcDest);
} else {
// Shift amount must be in rcx.
xchg(shift, rcx);
sarq_cl(shift == srcDest ? rcx : srcDest == rcx ? shift : srcDest);
xchg(shift, rcx);
}
}
void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
MOZ_ASSERT(0 <= imm.value && imm.value < 64);
rshiftPtrArithmetic(imm, dest.reg);
}
void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
if (Assembler::HasBMI2()) {
sarxq(srcDest.reg, shift, srcDest.reg);
return;
}
MOZ_ASSERT(shift == rcx);
sarq_cl(srcDest.reg);
}
// ===============================================================
// Rotation functions
void MacroAssembler::rotateLeft64(Register count, Register64 src,
Register64 dest) {
MOZ_ASSERT(src == dest, "defineReuseInput");
MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
rolq_cl(dest.reg);
}
void MacroAssembler::rotateLeft64(Register count, Register64 src,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
rotateLeft64(count, src, dest);
}
void MacroAssembler::rotateRight64(Register count, Register64 src,
Register64 dest) {
MOZ_ASSERT(src == dest, "defineReuseInput");
MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
rorq_cl(dest.reg);
}
void MacroAssembler::rotateRight64(Register count, Register64 src,
Register64 dest, Register temp) {
MOZ_ASSERT(temp == InvalidReg);
rotateRight64(count, src, dest);
}
void MacroAssembler::rotateLeft64(Imm32 count, Register64 src,
Register64 dest) {
MOZ_ASSERT(src == dest, "defineReuseInput");
rolq(count, dest.reg);
}
void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
Register temp) {
MOZ_ASSERT(temp == InvalidReg);
rotateLeft64(count, src, dest);
}
void MacroAssembler::rotateRight64(Imm32 count, Register64 src,
Register64 dest) {
MOZ_ASSERT(src == dest, "defineReuseInput");
rorq(count, dest.reg);
}
void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
Register temp) {
MOZ_ASSERT(temp == InvalidReg);
rotateRight64(count, src, dest);
}
// ===============================================================
// Condition functions
void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Register64 rhs,
Register dest) {
cmpPtrSet(cond, lhs.reg, rhs.reg, dest);
}
void MacroAssembler::cmp64Set(Condition cond, Register64 lhs, Imm64 rhs,
Register dest) {
cmpPtrSet(cond, lhs.reg, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
}
void MacroAssembler::cmp64Set(Condition cond, Address lhs, Register64 rhs,
Register dest) {
cmpPtrSet(cond, lhs, rhs.reg, dest);
}
void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
Register dest) {
cmpPtrSet(cond, lhs, ImmWord(static_cast<uintptr_t>(rhs.value)), dest);
}
template <typename T1, typename T2>
void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
bool destIsZero = maybeEmitSetZeroByteRegister(lhs, rhs, dest);
cmpPtr(lhs, rhs);
emitSet(cond, dest, destIsZero);
}
// ===============================================================
// Bit counting functions
void MacroAssembler::clz64(Register64 src, Register64 dest) {
if (AssemblerX86Shared::HasLZCNT()) {
lzcntq(src.reg, dest.reg);
return;
}
Label nonzero;
bsrq(src.reg, dest.reg);
j(Assembler::NonZero, &nonzero);
movq(ImmWord(0x7F), dest.reg);
bind(&nonzero);
xorq(Imm32(0x3F), dest.reg);
}
void MacroAssembler::ctz64(Register64 src, Register64 dest) {
if (AssemblerX86Shared::HasBMI1()) {
tzcntq(src.reg, dest.reg);
return;
}
Label nonzero;
bsfq(src.reg, dest.reg);
j(Assembler::NonZero, &nonzero);
movq(ImmWord(64), dest.reg);
bind(&nonzero);
}
void MacroAssembler::popcnt64(Register64 src64, Register64 dest64,
Register tmp) {
Register src = src64.reg;
Register dest = dest64.reg;
if (AssemblerX86Shared::HasPOPCNT()) {
popcntq(src, dest);
return;
}
MOZ_ASSERT(tmp != InvalidReg);
if (src != dest) {
movq(src, dest);
}
MOZ_ASSERT(tmp != dest);
ScratchRegisterScope scratch(*this);
// Equivalent to mozilla::CountPopulation32, adapted for 64 bits.
// x -= (x >> 1) & m1;
movq(src, tmp);
movq(ImmWord(0x5555555555555555), scratch);
shrq(Imm32(1), tmp);
andq(scratch, tmp);
subq(tmp, dest);
// x = (x & m2) + ((x >> 2) & m2);
movq(dest, tmp);
movq(ImmWord(0x3333333333333333), scratch);
andq(scratch, dest);
shrq(Imm32(2), tmp);
andq(scratch, tmp);
addq(tmp, dest);
// x = (x + (x >> 4)) & m4;
movq(dest, tmp);
movq(ImmWord(0x0f0f0f0f0f0f0f0f), scratch);
shrq(Imm32(4), tmp);
addq(tmp, dest);
andq(scratch, dest);
// (x * h01) >> 56
movq(ImmWord(0x0101010101010101), scratch);
imulq(scratch, dest);
shrq(Imm32(56), dest);
}
// ===============================================================
// Branch functions
void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
Register rhs, Label* label) {
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branch32(cond, Operand(lhs), rhs, label);
} else {
ScratchRegisterScope scratch(*this);
mov(ImmPtr(lhs.addr), scratch);
branch32(cond, Address(scratch, 0), rhs, label);
}
}
void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
Imm32 rhs, Label* label) {
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branch32(cond, Operand(lhs), rhs, label);
} else {
ScratchRegisterScope scratch(*this);
mov(ImmPtr(lhs.addr), scratch);
branch32(cond, Address(scratch, 0), rhs, label);
}
}
void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
Imm32 rhs, Label* label) {
ScratchRegisterScope scratch(*this);
mov(lhs, scratch);
branch32(cond, Address(scratch, 0), rhs, label);
}
void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
Label* success, Label* fail) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
cond == Assembler::LessThan ||
cond == Assembler::LessThanOrEqual ||
cond == Assembler::GreaterThan ||
cond == Assembler::GreaterThanOrEqual ||
cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
cond == Assembler::Above || cond == Assembler::AboveOrEqual,
"other condition codes not supported");
branchPtr(cond, lhs.reg, ImmWord(val.value), success);
if (fail) {
jump(fail);
}
}
void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
Label* success, Label* fail) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
cond == Assembler::LessThan ||
cond == Assembler::LessThanOrEqual ||
cond == Assembler::GreaterThan ||
cond == Assembler::GreaterThanOrEqual ||
cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
cond == Assembler::Above || cond == Assembler::AboveOrEqual,
"other condition codes not supported");
branchPtr(cond, lhs.reg, rhs.reg, success);
if (fail) {
jump(fail);
}
}
void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
Label* success, Label* fail) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
cond == Assembler::LessThan ||
cond == Assembler::LessThanOrEqual ||
cond == Assembler::GreaterThan ||
cond == Assembler::GreaterThanOrEqual ||
cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
cond == Assembler::Above || cond == Assembler::AboveOrEqual,
"other condition codes not supported");
branchPtr(cond, lhs, ImmWord(val.value), success);
if (fail) {
jump(fail);
}
}
void MacroAssembler::branch64(Condition cond, const Address& lhs,
Register64 rhs, Label* success, Label* fail) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
cond == Assembler::LessThan ||
cond == Assembler::LessThanOrEqual ||
cond == Assembler::GreaterThan ||
cond == Assembler::GreaterThanOrEqual ||
cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
cond == Assembler::Above || cond == Assembler::AboveOrEqual,
"other condition codes not supported");
branchPtr(cond, lhs, rhs.reg, success);
if (fail) {
jump(fail);
}
}
void MacroAssembler::branch64(Condition cond, const Address& lhs,
const Address& rhs, Register scratch,
Label* label) {
MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
"other condition codes not supported");
MOZ_ASSERT(lhs.base != scratch);
MOZ_ASSERT(rhs.base != scratch);
loadPtr(rhs, scratch);
branchPtr(cond, lhs, scratch, label);
}
void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
Register rhs, Label* label) {
ScratchRegisterScope scratch(*this);
MOZ_ASSERT(rhs != scratch);
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branchPtrImpl(cond, Operand(lhs), rhs, label);
} else {
mov(ImmPtr(lhs.addr), scratch);
branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
}
}
void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
ImmWord rhs, Label* label) {
if (X86Encoding::IsAddressImmediate(lhs.addr)) {
branchPtrImpl(cond, Operand(lhs), rhs, label);
} else {
ScratchRegisterScope scratch(*this);
mov(ImmPtr(lhs.addr), scratch);
branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
}
}
void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
Register rhs, Label* label) {
ScratchRegisterScope scratch(*this);
MOZ_ASSERT(rhs != scratch);
mov(lhs, scratch);
branchPtrImpl(cond, Operand(scratch, 0x0), rhs, label);
}
void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
Register rhs, Label* label) {
branchPtr(cond, lhs, rhs, label);
}
void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
Register dest, Label* fail) {
vcvttss2sq(src, dest);
// Same trick as for Doubles
cmpPtr(dest, Imm32(1));
j(Assembler::Overflow, fail);
}
void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
Register dest,
Label* fail) {
branchTruncateFloat32ToPtr(src, dest, fail);
movl(dest, dest); // Zero upper 32-bits.
}
void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
Register dest, Label* fail) {
branchTruncateFloat32ToPtr(src, dest, fail);
// Check that the result is in the int32_t range.
ScratchRegisterScope scratch(*this);
move32To64SignExtend(dest, Register64(scratch));
cmpPtr(dest, scratch);
j(Assembler::NotEqual, fail);
movl(dest, dest); // Zero upper 32-bits.
}
void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
Label* fail) {