Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/arm64/CodeGenerator-arm64.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "jsnum.h"
#include "jit/CodeGenerator.h"
#include "jit/InlineScriptTree.h"
#include "jit/JitRuntime.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "jit/ReciprocalMulConstants.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
#include "vm/Shape.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "vm/JSScript-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using mozilla::FloorLog2;
using mozilla::Maybe;
using mozilla::NegativeInfinity;
using mozilla::Nothing;
using mozilla::Some;
// shared
CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph,
MacroAssembler* masm)
: CodeGeneratorShared(gen, graph, masm) {}
bool CodeGeneratorARM64::generateOutOfLineCode() {
AutoCreatedBy acb(masm, "CodeGeneratorARM64::generateOutOfLineCode");
if (!CodeGeneratorShared::generateOutOfLineCode()) {
return false;
}
if (deoptLabel_.used()) {
// All non-table-based bailouts will go here.
masm.bind(&deoptLabel_);
// Store the frame size, so the handler can recover the IonScript.
masm.push(Imm32(frameSize()));
TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
masm.jump(handler);
}
return !masm.oom();
}
void CodeGeneratorARM64::emitBranch(Assembler::Condition cond,
MBasicBlock* mirTrue,
MBasicBlock* mirFalse) {
if (isNextBlock(mirFalse->lir())) {
jumpToBlock(mirTrue, cond);
} else {
jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
jumpToBlock(mirTrue);
}
}
void OutOfLineBailout::accept(CodeGeneratorARM64* codegen) {
codegen->visitOutOfLineBailout(this);
}
void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
Register input = ToRegister(test->input());
MBasicBlock* mirTrue = test->ifTrue();
MBasicBlock* mirFalse = test->ifFalse();
// Jump to the True block if NonZero.
// Jump to the False block if Zero.
if (isNextBlock(mirFalse->lir())) {
masm.branch32(Assembler::NonZero, input, Imm32(0),
getJumpLabelForBranch(mirTrue));
} else {
masm.branch32(Assembler::Zero, input, Imm32(0),
getJumpLabelForBranch(mirFalse));
if (!isNextBlock(mirTrue->lir())) {
jumpToBlock(mirTrue);
}
}
}
void CodeGenerator::visitCompare(LCompare* comp) {
const MCompare* mir = comp->mir();
const MCompare::CompareType type = mir->compareType();
const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
const Register leftreg = ToRegister(comp->getOperand(0));
const LAllocation* right = comp->getOperand(1);
const Register defreg = ToRegister(comp->getDef(0));
if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
type == MCompare::Compare_UIntPtr ||
type == MCompare::Compare_WasmAnyRef) {
if (right->isConstant()) {
MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
masm.cmpPtrSet(cond, leftreg, Imm32(ToInt32(right)), defreg);
} else {
masm.cmpPtrSet(cond, leftreg, ToRegister(right), defreg);
}
return;
}
if (right->isConstant()) {
masm.cmp32Set(cond, leftreg, Imm32(ToInt32(right)), defreg);
} else {
masm.cmp32Set(cond, leftreg, ToRegister(right), defreg);
}
}
void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
const MCompare* mir = comp->cmpMir();
const MCompare::CompareType type = mir->compareType();
const LAllocation* left = comp->left();
const LAllocation* right = comp->right();
if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
type == MCompare::Compare_UIntPtr ||
type == MCompare::Compare_WasmAnyRef) {
if (right->isConstant()) {
MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
masm.cmpPtr(ToRegister(left), Imm32(ToInt32(right)));
} else {
masm.cmpPtr(ToRegister(left), ToRegister(right));
}
} else if (right->isConstant()) {
masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
} else {
masm.cmp32(ToRegister(left), ToRegister(right));
}
Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGeneratorARM64::bailoutIf(Assembler::Condition condition,
LSnapshot* snapshot) {
encode(snapshot);
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
masm.B(ool->entry(), condition);
}
void CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) {
MOZ_ASSERT_IF(!masm.oom(), label->used());
MOZ_ASSERT_IF(!masm.oom(), !label->bound());
encode(snapshot);
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
masm.retarget(label, ool->entry());
}
void CodeGeneratorARM64::bailout(LSnapshot* snapshot) {
Label label;
masm.b(&label);
bailoutFrom(&label, snapshot);
}
void CodeGeneratorARM64::visitOutOfLineBailout(OutOfLineBailout* ool) {
masm.push(Imm32(ool->snapshot()->snapshotOffset()));
masm.B(&deoptLabel_);
}
void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
ARMFPRegister lhs(ToFloatRegister(ins->first()), 64);
ARMFPRegister rhs(ToFloatRegister(ins->second()), 64);
ARMFPRegister output(ToFloatRegister(ins->output()), 64);
if (ins->mir()->isMax()) {
masm.Fmax(output, lhs, rhs);
} else {
masm.Fmin(output, lhs, rhs);
}
}
void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
ARMFPRegister lhs(ToFloatRegister(ins->first()), 32);
ARMFPRegister rhs(ToFloatRegister(ins->second()), 32);
ARMFPRegister output(ToFloatRegister(ins->output()), 32);
if (ins->mir()->isMax()) {
masm.Fmax(output, lhs, rhs);
} else {
masm.Fmin(output, lhs, rhs);
}
}
template <typename T>
static ARMRegister toWRegister(const T* a) {
return ARMRegister(ToRegister(a), 32);
}
template <typename T>
static ARMRegister toXRegister(const T* a) {
return ARMRegister(ToRegister(a), 64);
}
Operand toWOperand(const LAllocation* a) {
if (a->isConstant()) {
return Operand(ToInt32(a));
}
return Operand(toWRegister(a));
}
vixl::CPURegister ToCPURegister(const LAllocation* a, Scalar::Type type) {
if (a->isFloatReg() && type == Scalar::Float64) {
return ARMFPRegister(ToFloatRegister(a), 64);
}
if (a->isFloatReg() && type == Scalar::Float32) {
return ARMFPRegister(ToFloatRegister(a), 32);
}
if (a->isGeneralReg()) {
return ARMRegister(ToRegister(a), 32);
}
MOZ_CRASH("Unknown LAllocation");
}
vixl::CPURegister ToCPURegister(const LDefinition* d, Scalar::Type type) {
return ToCPURegister(d->output(), type);
}
// Let |cond| be an ARM64 condition code that we could reasonably use in a
// conditional branch or select following a comparison instruction. This
// function returns the condition to use in the case where we swap the two
// operands of the comparison instruction.
Assembler::Condition GetCondForSwappedOperands(Assembler::Condition cond) {
// EQ and NE map to themselves
// Of the remaining 14 cases, 4 other pairings can meaningfully swap:
// HS -- LS
// LO -- HI
// GE -- LE
// GT -- LT
switch (cond) {
case vixl::eq:
case vixl::ne:
return cond;
case vixl::hs:
return vixl::ls;
case vixl::ls:
return vixl::hs;
case vixl::lo:
return vixl::hi;
case vixl::hi:
return vixl::lo;
case vixl::ge:
return vixl::le;
case vixl::le:
return vixl::ge;
case vixl::gt:
return vixl::lt;
case vixl::lt:
return vixl::gt;
default:
MOZ_CRASH("no meaningful swapped-operand condition");
}
}
void CodeGenerator::visitAddI(LAddI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
// Platforms with three-operand arithmetic ops don't need recovery.
MOZ_ASSERT(!ins->recoversInput());
if (ins->snapshot()) {
masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
bailoutIf(Assembler::Overflow, ins->snapshot());
} else {
masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
}
}
void CodeGenerator::visitSubI(LSubI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
// Platforms with three-operand arithmetic ops don't need recovery.
MOZ_ASSERT(!ins->recoversInput());
if (ins->snapshot()) {
masm.Subs(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
bailoutIf(Assembler::Overflow, ins->snapshot());
} else {
masm.Sub(toWRegister(dest), toWRegister(lhs), toWOperand(rhs));
}
}
void CodeGenerator::visitMulI(LMulI* ins) {
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
MMul* mul = ins->mir();
MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
!mul->canBeNegativeZero() && !mul->canOverflow());
Register lhsreg = ToRegister(lhs);
const ARMRegister lhsreg32 = ARMRegister(lhsreg, 32);
Register destreg = ToRegister(dest);
const ARMRegister destreg32 = ARMRegister(destreg, 32);
if (rhs->isConstant()) {
// Bailout on -0.0.
int32_t constant = ToInt32(rhs);
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition bailoutCond =
(constant == 0) ? Assembler::LessThan : Assembler::Equal;
masm.Cmp(toWRegister(lhs), Operand(0));
bailoutIf(bailoutCond, ins->snapshot());
}
switch (constant) {
case -1:
masm.Negs(destreg32, Operand(lhsreg32));
break; // Go to overflow check.
case 0:
masm.Mov(destreg32, wzr);
return; // Avoid overflow check.
case 1:
if (destreg != lhsreg) {
masm.Mov(destreg32, lhsreg32);
}
return; // Avoid overflow check.
case 2:
if (!mul->canOverflow()) {
masm.Add(destreg32, lhsreg32, Operand(lhsreg32));
return; // Avoid overflow check.
}
masm.Adds(destreg32, lhsreg32, Operand(lhsreg32));
break; // Go to overflow check.
default:
// Use shift if cannot overflow and constant is a power of 2
if (!mul->canOverflow() && constant > 0) {
int32_t shift = FloorLog2(constant);
if ((1 << shift) == constant) {
masm.Lsl(destreg32, lhsreg32, shift);
return;
}
}
// Otherwise, just multiply. We have to check for overflow.
// Negative zero was handled above.
Label bailout;
Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const Register scratch = temps.AcquireW().asUnsized();
masm.move32(Imm32(constant), scratch);
masm.mul32(lhsreg, scratch, destreg, onOverflow);
if (onOverflow) {
MOZ_ASSERT(lhsreg != destreg);
bailoutFrom(&bailout, ins->snapshot());
}
return;
}
// Overflow check.
if (mul->canOverflow()) {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
} else {
Register rhsreg = ToRegister(rhs);
const ARMRegister rhsreg32 = ARMRegister(rhsreg, 32);
Label bailout;
Label* onOverflow = mul->canOverflow() ? &bailout : nullptr;
if (mul->canBeNegativeZero()) {
// The product of two integer operands is negative zero iff one
// operand is zero, and the other is negative. Therefore, the
// sum of the two operands will also be negative (specifically,
// it will be the non-zero operand). If the result of the
// multiplication is 0, we can check the sign of the sum to
// determine whether we should bail out.
// This code can bailout, so lowering guarantees that the input
// operands are not overwritten.
MOZ_ASSERT(destreg != lhsreg);
MOZ_ASSERT(destreg != rhsreg);
// Do the multiplication.
masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
// Set Zero flag if destreg is 0.
masm.test32(destreg, destreg);
// ccmn is 'conditional compare negative'.
// If the Zero flag is set:
// perform a compare negative (compute lhs+rhs and set flags)
// else:
// clear flags
masm.Ccmn(lhsreg32, rhsreg32, vixl::NoFlag, Assembler::Zero);
// Bails out if (lhs * rhs == 0) && (lhs + rhs < 0):
bailoutIf(Assembler::LessThan, ins->snapshot());
} else {
masm.mul32(lhsreg, rhsreg, destreg, onOverflow);
}
if (onOverflow) {
bailoutFrom(&bailout, ins->snapshot());
}
}
}
void CodeGenerator::visitDivI(LDivI* ins) {
const Register lhs = ToRegister(ins->lhs());
const Register rhs = ToRegister(ins->rhs());
const Register output = ToRegister(ins->output());
const ARMRegister lhs32 = toWRegister(ins->lhs());
const ARMRegister rhs32 = toWRegister(ins->rhs());
const ARMRegister temp32 = toWRegister(ins->getTemp(0));
const ARMRegister output32 = toWRegister(ins->output());
MDiv* mir = ins->mir();
Label done;
// Handle division by zero.
if (mir->canBeDivideByZero()) {
masm.test32(rhs, rhs);
if (mir->trapOnError()) {
Label nonZero;
masm.j(Assembler::NonZero, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else if (mir->canTruncateInfinities()) {
// Truncated division by zero is zero: (Infinity|0 = 0).
Label nonZero;
masm.j(Assembler::NonZero, &nonZero);
masm.Mov(output32, wzr);
masm.jump(&done);
masm.bind(&nonZero);
} else {
MOZ_ASSERT(mir->fallible());
bailoutIf(Assembler::Zero, ins->snapshot());
}
}
// Handle an integer overflow from (INT32_MIN / -1).
// The integer division gives INT32_MIN, but should be -(double)INT32_MIN.
if (mir->canBeNegativeOverflow()) {
Label notOverflow;
// Branch to handle the non-overflow cases.
masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), &notOverflow);
masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notOverflow);
// Handle overflow.
if (mir->trapOnError()) {
masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
} else if (mir->canTruncateOverflow()) {
// (-INT32_MIN)|0 == INT32_MIN, which is already in lhs.
masm.move32(lhs, output);
masm.jump(&done);
} else {
MOZ_ASSERT(mir->fallible());
bailout(ins->snapshot());
}
masm.bind(&notOverflow);
}
// Handle negative zero: lhs == 0 && rhs < 0.
if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
Label nonZero;
masm.branch32(Assembler::NotEqual, lhs, Imm32(0), &nonZero);
masm.cmp32(rhs, Imm32(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
masm.bind(&nonZero);
}
// Perform integer division.
if (mir->canTruncateRemainder()) {
masm.Sdiv(output32, lhs32, rhs32);
} else {
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
ARMRegister scratch32 = temps.AcquireW();
// ARM does not automatically calculate the remainder.
// The ISR suggests multiplication to determine whether a remainder exists.
masm.Sdiv(scratch32, lhs32, rhs32);
masm.Mul(temp32, scratch32, rhs32);
masm.Cmp(lhs32, temp32);
bailoutIf(Assembler::NotEqual, ins->snapshot());
masm.Mov(output32, scratch32);
}
masm.bind(&done);
}
void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
const Register numerator = ToRegister(ins->numerator());
const ARMRegister numerator32 = toWRegister(ins->numerator());
const ARMRegister output32 = toWRegister(ins->output());
int32_t shift = ins->shift();
bool negativeDivisor = ins->negativeDivisor();
MDiv* mir = ins->mir();
if (!mir->isTruncated() && negativeDivisor) {
// 0 divided by a negative number returns a -0 double.
bailoutTest32(Assembler::Zero, numerator, numerator, ins->snapshot());
}
if (shift) {
if (!mir->isTruncated()) {
// If the remainder is != 0, bailout since this must be a double.
bailoutTest32(Assembler::NonZero, numerator,
Imm32(UINT32_MAX >> (32 - shift)), ins->snapshot());
}
if (mir->isUnsigned()) {
// shift right
masm.Lsr(output32, numerator32, shift);
} else {
ARMRegister temp32 = numerator32;
// Adjust the value so that shifting produces a correctly
// rounded result when the numerator is negative. See 10-1
// "Signed Division by a Known Power of 2" in Henry
// S. Warren, Jr.'s Hacker's Delight.
if (mir->canBeNegativeDividend() && mir->isTruncated()) {
if (shift > 1) {
// Copy the sign bit of the numerator. (= (2^32 - 1) or 0)
masm.Asr(output32, numerator32, 31);
temp32 = output32;
}
// Divide by 2^(32 - shift)
// i.e. (= (2^32 - 1) / 2^(32 - shift) or 0)
// i.e. (= (2^shift - 1) or 0)
masm.Lsr(output32, temp32, 32 - shift);
// If signed, make any 1 bit below the shifted bits to bubble up, such
// that once shifted the value would be rounded towards 0.
masm.Add(output32, output32, numerator32);
temp32 = output32;
}
masm.Asr(output32, temp32, shift);
if (negativeDivisor) {
masm.Neg(output32, output32);
}
}
return;
}
if (negativeDivisor) {
// INT32_MIN / -1 overflows.
if (!mir->isTruncated()) {
masm.Negs(output32, numerator32);
bailoutIf(Assembler::Overflow, ins->snapshot());
} else if (mir->trapOnError()) {
Label ok;
masm.Negs(output32, numerator32);
masm.branch(Assembler::NoOverflow, &ok);
masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
masm.bind(&ok);
} else {
// Do not set condition flags.
masm.Neg(output32, numerator32);
}
} else {
if (mir->isUnsigned() && !mir->isTruncated()) {
// Copy and set flags.
masm.Adds(output32, numerator32, 0);
// Unsigned division by 1 can overflow if output is not truncated, as we
// do not have an Unsigned type for MIR instructions.
bailoutIf(Assembler::Signed, ins->snapshot());
} else {
// Copy the result.
masm.Mov(output32, numerator32);
}
}
}
void CodeGenerator::visitDivConstantI(LDivConstantI* ins) {
const ARMRegister lhs32 = toWRegister(ins->numerator());
const ARMRegister lhs64 = toXRegister(ins->numerator());
const ARMRegister const32 = toWRegister(ins->temp());
const ARMRegister output32 = toWRegister(ins->output());
const ARMRegister output64 = toXRegister(ins->output());
int32_t d = ins->denominator();
// The absolute value of the denominator isn't a power of 2.
using mozilla::Abs;
MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
// We will first divide by Abs(d), and negate the answer if d is negative.
// If desired, this can be avoided by generalizing computeDivisionConstants.
auto rmc = ReciprocalMulConstants::computeSignedDivisionConstants(Abs(d));
// We first compute (M * n) >> 32, where M = rmc.multiplier.
masm.Mov(const32, int32_t(rmc.multiplier));
if (rmc.multiplier > INT32_MAX) {
MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
// We actually compute (int32_t(M) * n) instead, without the upper bit.
// Thus, (M * n) = (int32_t(M) * n) + n << 32.
//
// ((int32_t(M) * n) + n << 32) can't overflow, as both operands have
// opposite signs because int32_t(M) is negative.
masm.Lsl(output64, lhs64, 32);
// Store (M * n) in output64.
masm.Smaddl(output64, const32, lhs32, output64);
} else {
// Store (M * n) in output64.
masm.Smull(output64, const32, lhs32);
}
// (M * n) >> (32 + shift) is the truncated division answer if n is
// non-negative, as proved in the comments of computeDivisionConstants. We
// must add 1 later if n is negative to get the right answer in all cases.
masm.Asr(output64, output64, 32 + rmc.shiftAmount);
// We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
// computed with just a sign-extending shift of 31 bits.
if (ins->canBeNegativeDividend()) {
masm.Asr(const32, lhs32, 31);
masm.Sub(output32, output32, const32);
}
// After this, output32 contains the correct truncated division result.
if (d < 0) {
masm.Neg(output32, output32);
}
if (!ins->mir()->isTruncated()) {
// This is a division op. Multiply the obtained value by d to check if
// the correct answer is an integer. This cannot overflow, since |d| > 1.
masm.Mov(const32, d);
masm.Msub(const32, output32, const32, lhs32);
// bailout if (lhs - output * d != 0)
masm.Cmp(const32, wzr);
auto bailoutCond = Assembler::NonZero;
// If lhs is zero and the divisor is negative, the answer should have
// been -0.
if (d < 0) {
// or bailout if (lhs == 0).
// ^ ^
// | '-- masm.Ccmp(lhs32, lhs32, .., ..)
// '-- masm.Ccmp(.., .., vixl::ZFlag, ! bailoutCond)
masm.Ccmp(lhs32, wzr, vixl::ZFlag, Assembler::Zero);
bailoutCond = Assembler::Zero;
}
// bailout if (lhs - output * d != 0) or (d < 0 && lhs == 0)
bailoutIf(bailoutCond, ins->snapshot());
}
}
void CodeGenerator::visitUDivConstantI(LUDivConstantI* ins) {
const ARMRegister lhs32 = toWRegister(ins->numerator());
const ARMRegister lhs64 = toXRegister(ins->numerator());
const ARMRegister const32 = toWRegister(ins->temp());
const ARMRegister output32 = toWRegister(ins->output());
const ARMRegister output64 = toXRegister(ins->output());
uint32_t d = ins->denominator();
if (d == 0) {
if (ins->mir()->isTruncated()) {
if (ins->mir()->trapOnError()) {
masm.wasmTrap(wasm::Trap::IntegerDivideByZero,
ins->mir()->bytecodeOffset());
} else {
masm.Mov(output32, wzr);
}
} else {
bailout(ins->snapshot());
}
return;
}
// The denominator isn't a power of 2 (see LDivPowTwoI).
MOZ_ASSERT((d & (d - 1)) != 0);
auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(d);
// We first compute (M * n), where M = rmc.multiplier.
masm.Mov(const32, int32_t(rmc.multiplier));
masm.Umull(output64, const32, lhs32);
if (rmc.multiplier > UINT32_MAX) {
// M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
// ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
// contradicting the proof of correctness in computeDivisionConstants.
MOZ_ASSERT(rmc.shiftAmount > 0);
MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
// We actually compute (uint32_t(M) * n) instead, without the upper bit.
// Thus, (M * n) = (uint32_t(M) * n) + n << 32.
//
// ((uint32_t(M) * n) + n << 32) can overflow. Hacker's Delight explains a
// trick to avoid this overflow case, but we can avoid it by computing the
// addition on 64 bits registers.
//
// Compute ((uint32_t(M) * n) >> 32 + n)
masm.Add(output64, lhs64, Operand(output64, vixl::LSR, 32));
// (M * n) >> (32 + shift) is the truncated division answer.
masm.Lsr(output64, output64, rmc.shiftAmount);
} else {
// (M * n) >> (32 + shift) is the truncated division answer.
masm.Lsr(output64, output64, 32 + rmc.shiftAmount);
}
// We now have the truncated division value. We are checking whether the
// division resulted in an integer, we multiply the obtained value by d and
// check the remainder of the division.
if (!ins->mir()->isTruncated()) {
masm.Mov(const32, d);
masm.Msub(const32, output32, const32, lhs32);
// bailout if (lhs - output * d != 0)
masm.Cmp(const32, const32);
bailoutIf(Assembler::NonZero, ins->snapshot());
}
}
void CodeGenerator::visitModI(LModI* ins) {
ARMRegister lhs = toWRegister(ins->lhs());
ARMRegister rhs = toWRegister(ins->rhs());
ARMRegister output = toWRegister(ins->output());
Label done;
MMod* mir = ins->mir();
// Prevent divide by zero.
if (mir->canBeDivideByZero()) {
if (mir->isTruncated()) {
if (mir->trapOnError()) {
Label nonZero;
masm.Cbnz(rhs, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
masm.bind(&nonZero);
} else {
// Truncated division by zero yields integer zero.
masm.Mov(output, rhs);
masm.Cbz(rhs, &done);
}
} else {
// Non-truncated division by zero produces a non-integer.
MOZ_ASSERT(!gen->compilingWasm());
masm.Cmp(rhs, Operand(0));
bailoutIf(Assembler::Equal, ins->snapshot());
}
}
// Signed division.
masm.Sdiv(output, lhs, rhs);
// Compute the remainder: output = lhs - (output * rhs).
masm.Msub(output, output, rhs, lhs);
if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
// If output == 0 and lhs < 0, then the result should be double -0.0.
// Note that this guard handles lhs == INT_MIN and rhs == -1:
// output = INT_MIN - (INT_MIN / -1) * -1
// = INT_MIN - INT_MIN
// = 0
masm.Cbnz(output, &done);
bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
}
if (done.used()) {
masm.bind(&done);
}
}
void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
Register lhs = ToRegister(ins->getOperand(0));
ARMRegister lhsw = toWRegister(ins->getOperand(0));
ARMRegister outw = toWRegister(ins->output());
int32_t shift = ins->shift();
bool canBeNegative =
!ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend();
Label negative;
if (canBeNegative) {
// Switch based on sign of the lhs.
// Positive numbers are just a bitmask.
masm.branchTest32(Assembler::Signed, lhs, lhs, &negative);
}
masm.And(outw, lhsw, Operand((uint32_t(1) << shift) - 1));
if (canBeNegative) {
Label done;
masm.jump(&done);
// Negative numbers need a negate, bitmask, negate.
masm.bind(&negative);
masm.Neg(outw, Operand(lhsw));
masm.And(outw, outw, Operand((uint32_t(1) << shift) - 1));
// Since a%b has the same sign as b, and a is negative in this branch,
// an answer of 0 means the correct result is actually -0. Bail out.
if (!ins->mir()->isTruncated()) {
masm.Negs(outw, Operand(outw));
bailoutIf(Assembler::Zero, ins->snapshot());
} else {
masm.Neg(outw, Operand(outw));
}
masm.bind(&done);
}
}
void CodeGenerator::visitModMaskI(LModMaskI* ins) {
MMod* mir = ins->mir();
int32_t shift = ins->shift();
const Register src = ToRegister(ins->getOperand(0));
const Register dest = ToRegister(ins->getDef(0));
const Register hold = ToRegister(ins->getTemp(0));
const Register remain = ToRegister(ins->getTemp(1));
const ARMRegister src32 = ARMRegister(src, 32);
const ARMRegister dest32 = ARMRegister(dest, 32);
const ARMRegister remain32 = ARMRegister(remain, 32);
vixl::UseScratchRegisterScope temps(&masm.asVIXL());
const ARMRegister scratch32 = temps.AcquireW();
const Register scratch = scratch32.asUnsized();
// We wish to compute x % (1<<y) - 1 for a known constant, y.
//
// 1. Let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit dividend as
// a number in base b, namely c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
//
// 2. Since both addition and multiplication commute with modulus:
// x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
// (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
//
// 3. Since b == C + 1, b % C == 1, and b^n % C == 1 the whole thing
// simplifies to: c_0 + c_1 + c_2 ... c_n % C
//
// Each c_n can easily be computed by a shift/bitextract, and the modulus
// can be maintained by simply subtracting by C whenever the number gets
// over C.
int32_t mask = (1 << shift) - 1;
Label loop;
// Register 'hold' holds -1 if the value was negative, 1 otherwise.
// The remain reg holds the remaining bits that have not been processed.
// The scratch reg serves as a temporary location to store extracted bits.
// The dest reg is the accumulator, becoming final result.
//
// Move the whole value into the remain.
masm.Mov(remain32, src32);
// Zero out the dest.
masm.Mov(dest32, wzr);
// Set the hold appropriately.
{
Label negative;
masm.branch32(Assembler::Signed, remain, Imm32(0), &negative);
masm.move32(Imm32(1), hold);
masm.jump(&loop);
masm.bind(&negative);
masm.move32(Imm32(-1), hold);
masm.neg32(remain);
}
// Begin the main loop.
masm.bind(&loop);
{
// Extract the bottom bits into scratch.
masm.And(scratch32, remain32, Operand(mask));
// Add those bits to the accumulator.
masm.Add(dest32, dest32, scratch32);
// Do a trial subtraction. This functions as a cmp but remembers the result.
masm.Subs(scratch32, dest32, Operand(mask));
// If (sum - C) > 0, store sum - C back into sum, thus performing a modulus.
{
Label sumSigned;
masm.branch32(Assembler::Signed, scratch, scratch, &sumSigned);
masm.Mov(dest32, scratch32);
masm.bind(&sumSigned);
}
// Get rid of the bits that we extracted before.
masm.Lsr(remain32, remain32, shift);
// If the shift produced zero, finish, otherwise, continue in the loop.
masm.branchTest32(Assembler::NonZero, remain, remain, &loop);
}
// Check the hold to see if we need to negate the result.
{
Label done;
// If the hold was non-zero, negate the result to match JS expectations.
masm.branchTest32(Assembler::NotSigned, hold, hold, &done);
if (mir->canBeNegativeDividend() && !mir->isTruncated()) {
// Bail in case of negative zero hold.
bailoutTest32(Assembler::Zero, hold, hold, ins->snapshot());
}
masm.neg32(dest);
masm.bind(&done);
}
}
void CodeGeneratorARM64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
Register divisor, Register output,
Label* fail) {
// Callers handle division by zero and integer overflow.
const ARMRegister dividend64(dividend, 64);
const ARMRegister divisor64(divisor, 64);
masm.Sdiv(/* result= */ dividend64, dividend64, divisor64);
// Create and return the result.
masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
masm.initializeBigInt(output, dividend);
}
void CodeGeneratorARM64::emitBigIntMod(LBigIntMod* ins, Register dividend,
Register divisor, Register output,
Label* fail) {
// Callers handle division by zero and integer overflow.
const ARMRegister dividend64(dividend, 64);
const ARMRegister divisor64(divisor, 64);
const ARMRegister output64(output, 64);
// Signed division.
masm.Sdiv(output64, dividend64, divisor64);
// Compute the remainder: output = dividend - (output * divisor).
masm.Msub(/* result= */ dividend64, output64, divisor64, dividend64);
// Create and return the result.
masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
masm.initializeBigInt(output, dividend);
}
void CodeGenerator::visitBitNotI(LBitNotI* ins) {
const LAllocation* input = ins->getOperand(0);
const LDefinition* output = ins->getDef(0);
masm.Mvn(toWRegister(output), toWOperand(input));
}
void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
masm.Mvn(vixl::Register(output, 64), vixl::Register(input, 64));
}
void CodeGenerator::visitBitOpI(LBitOpI* ins) {
const ARMRegister lhs = toWRegister(ins->getOperand(0));
const Operand rhs = toWOperand(ins->getOperand(1));
const ARMRegister dest = toWRegister(ins->getDef(0));
switch (ins->bitop()) {
case JSOp::BitOr:
masm.Orr(dest, lhs, rhs);
break;
case JSOp::BitXor:
masm.Eor(dest, lhs, rhs);
break;
case JSOp::BitAnd:
masm.And(dest, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected binary opcode");
}
}
void CodeGenerator::visitShiftI(LShiftI* ins) {
const ARMRegister lhs = toWRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
const ARMRegister dest = toWRegister(ins->output());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
switch (ins->bitop()) {
case JSOp::Lsh:
masm.Lsl(dest, lhs, shift);
break;
case JSOp::Rsh:
masm.Asr(dest, lhs, shift);
break;
case JSOp::Ursh:
if (shift) {
masm.Lsr(dest, lhs, shift);
} else if (ins->mir()->toUrsh()->fallible()) {
// x >>> 0 can overflow.
masm.Ands(dest, lhs, Operand(0xFFFFFFFF));
bailoutIf(Assembler::Signed, ins->snapshot());
} else {
masm.Mov(dest, lhs);
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
} else {
const ARMRegister rhsreg = toWRegister(rhs);
switch (ins->bitop()) {
case JSOp::Lsh:
masm.Lsl(dest, lhs, rhsreg);
break;
case JSOp::Rsh:
masm.Asr(dest, lhs, rhsreg);
break;
case JSOp::Ursh:
masm.Lsr(dest, lhs, rhsreg);
if (ins->mir()->toUrsh()->fallible()) {
/// x >>> 0 can overflow.
masm.Cmp(dest, Operand(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
break;
default:
MOZ_CRASH("Unexpected shift op");
}
}
}
void CodeGenerator::visitUrshD(LUrshD* ins) {
const ARMRegister lhs = toWRegister(ins->lhs());
const LAllocation* rhs = ins->rhs();
const FloatRegister out = ToFloatRegister(ins->output());
const Register temp = ToRegister(ins->temp());
const ARMRegister temp32 = toWRegister(ins->temp());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
if (shift) {
masm.Lsr(temp32, lhs, shift);
masm.convertUInt32ToDouble(temp, out);
} else {
masm.convertUInt32ToDouble(ToRegister(ins->lhs()), out);
}
} else {
masm.And(temp32, toWRegister(rhs), Operand(0x1F));
masm.Lsr(temp32, lhs, temp32);
masm.convertUInt32ToDouble(temp, out);
}
}
void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
ScratchDoubleScope scratch(masm);
Label done, sqrt;
if (!ins->mir()->operandIsNeverNegativeInfinity()) {
// Branch if not -Infinity.
masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
if (ins->mir()->operandIsNeverNaN()) {
cond = Assembler::DoubleNotEqual;
}
masm.branchDouble(cond, input, scratch, &sqrt);
// Math.pow(-Infinity, 0.5) == Infinity.
masm.zeroDouble(output);
masm.subDouble(scratch, output);
masm.jump(&done);
masm.bind(&sqrt);
}
if (!ins->mir()->operandIsNeverNegativeZero()) {
// Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
// Adding 0 converts any -0 to 0.
masm.zeroDouble(scratch);
masm.addDouble(input, scratch);
masm.sqrtDouble(scratch, output);
} else {
masm.sqrtDouble(input, output);
}
masm.bind(&done);
}
MoveOperand CodeGeneratorARM64::toMoveOperand(const LAllocation a) const {
if (a.isGeneralReg()) {
return MoveOperand(ToRegister(a));
}
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
: MoveOperand::Kind::Memory;
return MoveOperand(ToAddress(a), kind);
}
class js::jit::OutOfLineTableSwitch
: public OutOfLineCodeBase<CodeGeneratorARM64> {
MTableSwitch* mir_;
CodeLabel jumpLabel_;
void accept(CodeGeneratorARM64* codegen) override {
codegen->visitOutOfLineTableSwitch(this);
}
public:
explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
MTableSwitch* mir() const { return mir_; }
CodeLabel* jumpLabel() { return &jumpLabel_; }
};
void CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) {
MTableSwitch* mir = ool->mir();
// Prevent nop and pools sequences to appear in the jump table.
AutoForbidPoolsAndNops afp(
&masm, (mir->numCases() + 1) * (sizeof(void*) / vixl::kInstructionSize));
masm.haltingAlign(sizeof(void*));
masm.bind(ool->jumpLabel());
masm.addCodeLabel(*ool->jumpLabel());
for (size_t i = 0; i < mir->numCases(); i++) {
LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
Label* caseheader = caseblock->label();
uint32_t caseoffset = caseheader->offset();
// The entries of the jump table need to be absolute addresses,
// and thus must be patched after codegen is finished.
CodeLabel cl;
masm.writeCodePointer(&cl);
cl.target()->bind(caseoffset);
masm.addCodeLabel(cl);
}
}
void CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir,
Register index,
Register base) {
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
// Let the lowest table entry be indexed at 0.
if (mir->low() != 0) {
masm.sub32(Imm32(mir->low()), index);
}
// Jump to the default case if input is out of range.
int32_t cases = mir->numCases();
masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase);
// Because the target code has not yet been generated, we cannot know the
// instruction offsets for use as jump targets. Therefore we construct
// an OutOfLineTableSwitch that winds up holding the jump table.
//
// Because the jump table is generated as part of out-of-line code,
// it is generated after all the regular codegen, so the jump targets
// are guaranteed to exist when generating the jump table.
OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
addOutOfLineCode(ool, mir);
// Use the index to get the address of the jump target from the table.
masm.mov(ool->jumpLabel(), base);
BaseIndex pointer(base, index, ScalePointer);
// Load the target from the jump table and branch to it.
masm.branchToComputedAddress(pointer);
}
void CodeGenerator::visitMathD(LMathD* math) {
ARMFPRegister lhs(ToFloatRegister(math->lhs()), 64);
ARMFPRegister rhs(ToFloatRegister(math->rhs()), 64);
ARMFPRegister output(ToFloatRegister(math->output()), 64);
switch (math->jsop()) {
case JSOp::Add:
masm.Fadd(output, lhs, rhs);
break;
case JSOp::Sub:
masm.Fsub(output, lhs, rhs);
break;
case JSOp::Mul:
masm.Fmul(output, lhs, rhs);
break;
case JSOp::Div:
masm.Fdiv(output, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitMathF(LMathF* math) {
ARMFPRegister lhs(ToFloatRegister(math->lhs()), 32);
ARMFPRegister rhs(ToFloatRegister(math->rhs()), 32);
ARMFPRegister output(ToFloatRegister(math->output()), 32);
switch (math->jsop()) {
case JSOp::Add:
masm.Fadd(output, lhs, rhs);
break;
case JSOp::Sub:
masm.Fsub(output, lhs, rhs);
break;
case JSOp::Mul:
masm.Fmul(output, lhs, rhs);
break;
case JSOp::Div:
masm.Fdiv(output, lhs, rhs);
break;
default:
MOZ_CRASH("unexpected opcode");
}
}
void CodeGenerator::visitClzI(LClzI* lir) {
ARMRegister input = toWRegister(lir->input());
ARMRegister output = toWRegister(lir->output());
masm.Clz(output, input);
}
void CodeGenerator::visitCtzI(LCtzI* lir) {
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
masm.ctz32(input, output, /* knownNotZero = */ false);
}
void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
void CodeGenerator::visitNearbyInt(LNearbyInt* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister output = ToFloatRegister(lir->output());
RoundingMode roundingMode = lir->mir()->roundingMode();
masm.nearbyIntDouble(roundingMode, input, output);
}
void CodeGenerator::visitNearbyIntF(LNearbyIntF* lir) {
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister output = ToFloatRegister(lir->output());
RoundingMode roundingMode = lir->mir()->roundingMode();
masm.nearbyIntFloat32(roundingMode, input, output);
}
void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
LWasmBuiltinTruncateDToInt32* lir) {
emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
ToRegister(lir->getDef(0)), lir->mir());
}
void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
LWasmBuiltinTruncateFToInt32* lir) {
emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
ToRegister(lir->getDef(0)), lir->mir());
}
ValueOperand CodeGeneratorARM64::ToValue(LInstruction* ins, size_t pos) {
return ValueOperand(ToRegister(ins->getOperand(pos)));
}
ValueOperand CodeGeneratorARM64::ToTempValue(LInstruction* ins, size_t pos) {
MOZ_CRASH("CodeGeneratorARM64::ToTempValue");
}
void CodeGenerator::visitValue(LValue* value) {
ValueOperand result = ToOutValue(value);
masm.moveValue(value->value(), result);
}
void CodeGenerator::visitBox(LBox* box) {
const LAllocation* in = box->getOperand(0);
ValueOperand result = ToOutValue(box);
masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
}
void CodeGenerator::visitUnbox(LUnbox* unbox) {
MUnbox* mir = unbox->mir();
Register result = ToRegister(unbox->output());
if (mir->fallible()) {
const ValueOperand value = ToValue(unbox, LUnbox::Input);
Label bail;
switch (mir->type()) {
case MIRType::Int32:
masm.fallibleUnboxInt32(value, result, &bail);
break;
case MIRType::Boolean:
masm.fallibleUnboxBoolean(value, result, &bail);
break;
case MIRType::Object:
masm.fallibleUnboxObject(value, result, &bail);
break;
case MIRType::String:
masm.fallibleUnboxString(value, result, &bail);
break;
case MIRType::Symbol:
masm.fallibleUnboxSymbol(value, result, &bail);
break;
case MIRType::BigInt:
masm.fallibleUnboxBigInt(value, result, &bail);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
bailoutFrom(&bail, unbox->snapshot());
return;
}
// Infallible unbox.
ValueOperand input = ToValue(unbox, LUnbox::Input);
#ifdef DEBUG
// Assert the types match.
JSValueTag tag = MIRTypeToTag(mir->type());
Label ok;
{
ScratchTagScope scratch(masm, input);
masm.splitTagForTest(input, scratch);
masm.cmpTag(scratch, ImmTag(tag));
}
masm.B(&ok, Assembler::Condition::Equal);
masm.assumeUnreachable("Infallible unbox type mismatch");
masm.bind(&ok);
#endif
switch (mir->type()) {
case MIRType::Int32:
masm.unboxInt32(input, result);
break;
case MIRType::Boolean:
masm.unboxBoolean(input, result);
break;
case MIRType::Object:
masm.unboxObject(input, result);
break;
case MIRType::String:
masm.unboxString(input, result);
break;
case MIRType::Symbol:
masm.unboxSymbol(input, result);
break;
case MIRType::BigInt:
masm.unboxBigInt(input, result);
break;
default:
MOZ_CRASH("Given MIRType cannot be unboxed.");
}
}
void CodeGenerator::visitDouble(LDouble* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
}
void CodeGenerator::visitFloat32(LFloat32* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
}
void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
const LAllocation* opd = test->input();
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 64), 0.0);
// If the compare set the 0 bit, then the result is definitely false.
jumpToBlock(ifFalse, Assembler::Zero);
// Overflow means one of the operands was NaN, which is also false.
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
}
void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
const LAllocation* opd = test->input();
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 32), 0.0);
// If the compare set the 0 bit, then the result is definitely false.
jumpToBlock(ifFalse, Assembler::Zero);
// Overflow means one of the operands was NaN, which is also false.
jumpToBlock(ifFalse, Assembler::Overflow);
jumpToBlock(ifTrue);
}
void CodeGenerator::visitCompareD(LCompareD* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
ARMRegister output = toWRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareDouble(cond, left, right);
masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
}
void CodeGenerator::visitCompareF(LCompareF* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
ARMRegister output = toWRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.compareFloat(cond, left, right);
masm.cset(output, Assembler::ConditionFromDoubleCondition(cond));
}
void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
Assembler::DoubleCondition doubleCond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::Condition cond =
Assembler::ConditionFromDoubleCondition(doubleCond);
masm.compareDouble(doubleCond, left, right);
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
const FloatRegister left = ToFloatRegister(comp->left());
const FloatRegister right = ToFloatRegister(comp->right());
Assembler::DoubleCondition doubleCond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::Condition cond =
Assembler::ConditionFromDoubleCondition(doubleCond);
masm.compareFloat(doubleCond, left, right);
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
if (baab->is64()) {
ARMRegister regL = toXRegister(baab->left());
if (baab->right()->isConstant()) {
masm.Tst(regL, Operand(ToInt64(baab->right())));
} else {
masm.Tst(regL, toXRegister(baab->right()));
}
} else {
ARMRegister regL = toWRegister(baab->left());
if (baab->right()->isConstant()) {
masm.Tst(regL, Operand(ToInt32(baab->right())));
} else {
masm.Tst(regL, toWRegister(baab->right()));
}
}
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
}
void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
masm.convertUInt32ToDouble(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
masm.convertUInt32ToFloat32(ToRegister(lir->input()),
ToFloatRegister(lir->output()));
}
void CodeGenerator::visitNotI(LNotI* ins) {
ARMRegister input = toWRegister(ins->input());
ARMRegister output = toWRegister(ins->output());
masm.Cmp(input, ZeroRegister32);
masm.Cset(output, Assembler::Zero);
}
// NZCV
// NAN -> 0011
// == -> 0110
// < -> 1000
// > -> 0010
void CodeGenerator::visitNotD(LNotD* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 64);
ARMRegister output = toWRegister(ins->output());
// Set output to 1 if input compares equal to 0.0, else 0.
masm.Fcmp(input, 0.0);
masm.Cset(output, Assembler::Equal);
// Comparison with NaN sets V in the NZCV register.
// If the input was NaN, output must now be zero, so it can be incremented.
// The instruction is read: "output = if NoOverflow then output else 0+1".
masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
}
void CodeGenerator::visitNotF(LNotF* ins) {
ARMFPRegister input(ToFloatRegister(ins->input()), 32);
ARMRegister output = toWRegister(ins->output());
// Set output to 1 input compares equal to 0.0, else 0.
masm.Fcmp(input, 0.0);
masm.Cset(output, Assembler::Equal);
// Comparison with NaN sets V in the NZCV register.
// If the input was NaN, output must now be zero, so it can be incremented.
// The instruction is read: "output = if NoOverflow then output else 0+1".
masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow);
}
void CodeGeneratorARM64::generateInvalidateEpilogue() {
// Ensure that there is enough space in the buffer for the OsiPoint patching
// to occur. Otherwise, we could overwrite the invalidation epilogue.
for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
masm.nop();
}
masm.bind(&invalidate_);
// Push the return address of the point that we bailout out onto the stack.
masm.push(lr);
// Push the Ion script onto the stack (when we determine what that pointer
// is).
invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
// Jump to the invalidator which will replace the current frame.
TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
masm.jump(thunk);
}
template <class U>
Register getBase(U* mir) {
switch (mir->base()) {
case U::Heap:
return HeapReg;
}
return InvalidReg;
}
void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
const MAsmJSLoadHeap* mir = ins->mir();
MOZ_ASSERT(!mir->hasMemoryBase());
const LAllocation* ptr = ins->ptr();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
Register ptrReg = ToRegister(ptr);
Scalar::Type accessType = mir->accessType();
bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64;
Label done;
if (mir->needsBoundsCheck()) {
Label boundsCheckPassed;
Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg,
&boundsCheckPassed);
// Return a default value in case of a bounds-check failure.
if (isFloat) {
if (accessType == Scalar::Float32) {
masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(ins->output()));
} else {
masm.loadConstantDouble(GenericNaN(), ToFloatRegister(ins->output()));
}
} else {
masm.Mov(ARMRegister(ToRegister(ins->output()), 64), 0);
}
masm.jump(&done);
masm.bind(&boundsCheckPassed);
}
MemOperand addr(ARMRegister(HeapReg, 64), ARMRegister(ptrReg, 64));
switch (accessType) {
case Scalar::Int8:
masm.Ldrb(toWRegister(ins->output()), addr);
masm.Sxtb(toWRegister(ins->output()), toWRegister(ins->output()));
break;
case Scalar::Uint8:
masm.Ldrb(toWRegister(ins->output()), addr);
break;
case Scalar::Int16:
masm.Ldrh(toWRegister(ins->output()), addr);
masm.Sxth(toWRegister(ins->output()), toWRegister(ins->output()));
break;
case Scalar::Uint16:
masm.Ldrh(toWRegister(ins->output()), addr);
break;
case Scalar::Int32:
case Scalar::Uint32:
masm.Ldr(toWRegister(ins->output()), addr);
break;
case Scalar::Float64:
masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 64), addr);
break;
case Scalar::Float32:
masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 32), addr);
break;
default:
MOZ_CRASH("unexpected array type");
}
if (done.used()) {
masm.bind(&done);
}
}
void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
const MAsmJSStoreHeap* mir = ins->mir();
MOZ_ASSERT(!mir->hasMemoryBase());
const LAllocation* ptr = ins->ptr();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit</