Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/x86-shared/CodeGenerator-x86-shared.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "jit/CodeGenerator.h"
#include "jit/InlineScriptTree.h"
#include "jit/JitRuntime.h"
#include "jit/RangeAnalysis.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/DifferentialTesting.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::Abs;
using mozilla::DebugOnly;
using mozilla::FloorLog2;
using mozilla::NegativeInfinity;
using JS::GenericNaN;
namespace js {
namespace jit {
CodeGeneratorX86Shared::CodeGeneratorX86Shared(MIRGenerator* gen,
LIRGraph* graph,
MacroAssembler* masm)
: CodeGeneratorShared(gen, graph, masm) {}
#ifdef JS_PUNBOX64
Operand CodeGeneratorX86Shared::ToOperandOrRegister64(
const LInt64Allocation input) {
return ToOperand(input.value());
}
#else
Register64 CodeGeneratorX86Shared::ToOperandOrRegister64(
const LInt64Allocation input) {
return ToRegister64(input);
}
#endif
void OutOfLineBailout::accept(CodeGeneratorX86Shared* codegen) {
codegen->visitOutOfLineBailout(this);
}
void CodeGeneratorX86Shared::emitBranch(Assembler::Condition cond,
MBasicBlock* mirTrue,
MBasicBlock* mirFalse,
Assembler::NaNCond ifNaN) {
if (ifNaN == Assembler::NaN_IsFalse) {
jumpToBlock(mirFalse, Assembler::Parity);
} else if (ifNaN == Assembler::NaN_IsTrue) {
jumpToBlock(mirTrue, Assembler::Parity);
}
if (isNextBlock(mirFalse->lir())) {
jumpToBlock(mirTrue, cond);
} else {
jumpToBlock(mirFalse, Assembler::InvertCondition(cond));
jumpToBlock(mirTrue);
}
}
void CodeGenerator::visitDouble(LDouble* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
}
void CodeGenerator::visitFloat32(LFloat32* ins) {
const LDefinition* out = ins->getDef(0);
masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
}
void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
Register input = ToRegister(test->input());
masm.test32(input, input);
emitBranch(Assembler::NonZero, test->ifTrue(), test->ifFalse());
}
void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
const LAllocation* opd = test->input();
// vucomisd flags:
// Z P C
// ---------
// NaN 1 1 1
// > 0 0 0
// < 0 0 1
// = 1 0 0
//
// NaN is falsey, so comparing against 0 and then using the Z flag is
// enough to determine which branch to take.
ScratchDoubleScope scratch(masm);
masm.zeroDouble(scratch);
masm.vucomisd(scratch, ToFloatRegister(opd));
emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
}
void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
const LAllocation* opd = test->input();
// vucomiss flags are the same as doubles; see comment above
{
ScratchFloat32Scope scratch(masm);
masm.zeroFloat32(scratch);
masm.vucomiss(scratch, ToFloatRegister(opd));
}
emitBranch(Assembler::NotEqual, test->ifTrue(), test->ifFalse());
}
void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
if (baab->right()->isConstant()) {
masm.test32(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
} else {
masm.test32(ToRegister(baab->left()), ToRegister(baab->right()));
}
emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
}
void CodeGeneratorX86Shared::emitCompare(MCompare::CompareType type,
const LAllocation* left,
const LAllocation* right) {
#ifdef JS_CODEGEN_X64
if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
type == MCompare::Compare_UIntPtr) {
if (right->isConstant()) {
MOZ_ASSERT(type == MCompare::Compare_UIntPtr);
masm.cmpPtr(ToRegister(left), Imm32(ToInt32(right)));
} else {
masm.cmpPtr(ToRegister(left), ToOperand(right));
}
return;
}
#endif
if (right->isConstant()) {
masm.cmp32(ToRegister(left), Imm32(ToInt32(right)));
} else {
masm.cmp32(ToRegister(left), ToOperand(right));
}
}
void CodeGenerator::visitCompare(LCompare* comp) {
MCompare* mir = comp->mir();
emitCompare(mir->compareType(), comp->left(), comp->right());
masm.emitSet(JSOpToCondition(mir->compareType(), comp->jsop()),
ToRegister(comp->output()));
}
void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
MCompare* mir = comp->cmpMir();
emitCompare(mir->compareType(), comp->left(), comp->right());
Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
void CodeGenerator::visitCompareD(LCompareD* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
if (comp->mir()->operandsAreNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
masm.compareDouble(cond, lhs, rhs);
masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
ToRegister(comp->output()), nanCond);
}
void CodeGenerator::visitCompareF(LCompareF* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
if (comp->mir()->operandsAreNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
masm.compareFloat(cond, lhs, rhs);
masm.emitSet(Assembler::ConditionFromDoubleCondition(cond),
ToRegister(comp->output()), nanCond);
}
void CodeGenerator::visitNotI(LNotI* ins) {
masm.cmp32(ToRegister(ins->input()), Imm32(0));
masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
}
void CodeGenerator::visitNotD(LNotD* ins) {
FloatRegister opd = ToFloatRegister(ins->input());
// Not returns true if the input is a NaN. We don't have to worry about
// it if we know the input is never NaN though.
Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
if (ins->mir()->operandIsNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
ScratchDoubleScope scratch(masm);
masm.zeroDouble(scratch);
masm.compareDouble(Assembler::DoubleEqualOrUnordered, opd, scratch);
masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
}
void CodeGenerator::visitNotF(LNotF* ins) {
FloatRegister opd = ToFloatRegister(ins->input());
// Not returns true if the input is a NaN. We don't have to worry about
// it if we know the input is never NaN though.
Assembler::NaNCond nanCond = Assembler::NaN_IsTrue;
if (ins->mir()->operandIsNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
ScratchFloat32Scope scratch(masm);
masm.zeroFloat32(scratch);
masm.compareFloat(Assembler::DoubleEqualOrUnordered, opd, scratch);
masm.emitSet(Assembler::Equal, ToRegister(ins->output()), nanCond);
}
void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
if (comp->cmpMir()->operandsAreNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
masm.compareDouble(cond, lhs, rhs);
emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
comp->ifFalse(), nanCond);
}
void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond =
JSOpToDoubleCondition(comp->cmpMir()->jsop());
Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
if (comp->cmpMir()->operandsAreNeverNaN()) {
nanCond = Assembler::NaN_HandledByCond;
}
masm.compareFloat(cond, lhs, rhs);
emitBranch(Assembler::ConditionFromDoubleCondition(cond), comp->ifTrue(),
comp->ifFalse(), nanCond);
}
void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
const MWasmStackArg* mir = ins->mir();
Address dst(StackPointer, mir->spOffset());
if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())), dst);
} else if (ins->arg()->isGeneralReg()) {
masm.storePtr(ToRegister(ins->arg()), dst);
} else {
switch (mir->input()->type()) {
case MIRType::Double:
masm.storeDouble(ToFloatRegister(ins->arg()), dst);
return;
case MIRType::Float32:
masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
return;
#ifdef ENABLE_WASM_SIMD
case MIRType::Simd128:
masm.storeUnalignedSimd128(ToFloatRegister(ins->arg()), dst);
return;
#endif
default:
break;
}
MOZ_CRASH("unexpected mir type in WasmStackArg");
}
}
void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
const MWasmStackArg* mir = ins->mir();
Address dst(StackPointer, mir->spOffset());
if (IsConstant(ins->arg())) {
masm.store64(Imm64(ToInt64(ins->arg())), dst);
} else {
masm.store64(ToRegister64(ins->arg()), dst);
}
}
void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
MIRType mirType = ins->mir()->type();
Register cond = ToRegister(ins->condExpr());
Operand falseExpr = ToOperand(ins->falseExpr());
masm.test32(cond, cond);
if (mirType == MIRType::Int32 || mirType == MIRType::RefOrNull) {
Register out = ToRegister(ins->output());
MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
"true expr input is reused for output");
if (mirType == MIRType::Int32) {
masm.cmovz32(falseExpr, out);
} else {
masm.cmovzPtr(falseExpr, out);
}
return;
}
FloatRegister out = ToFloatRegister(ins->output());
MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
"true expr input is reused for output");
Label done;
masm.j(Assembler::NonZero, &done);
if (mirType == MIRType::Float32) {
if (falseExpr.kind() == Operand::FPREG) {
masm.moveFloat32(ToFloatRegister(ins->falseExpr()), out);
} else {
masm.loadFloat32(falseExpr, out);
}
} else if (mirType == MIRType::Double) {
if (falseExpr.kind() == Operand::FPREG) {
masm.moveDouble(ToFloatRegister(ins->falseExpr()), out);
} else {
masm.loadDouble(falseExpr, out);
}
} else if (mirType == MIRType::Simd128) {
if (falseExpr.kind() == Operand::FPREG) {
masm.moveSimd128(ToFloatRegister(ins->falseExpr()), out);
} else {
masm.loadUnalignedSimd128(falseExpr, out);
}
} else {
MOZ_CRASH("unhandled type in visitWasmSelect!");
}
masm.bind(&done);
}
void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
emitWasmCompareAndSelect(ins);
}
void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
MOZ_ASSERT(gen->compilingWasm());
MWasmReinterpret* ins = lir->mir();
MIRType to = ins->type();
#ifdef DEBUG
MIRType from = ins->input()->type();
#endif
switch (to) {
case MIRType::Int32:
MOZ_ASSERT(from == MIRType::Float32);
masm.vmovd(ToFloatRegister(lir->input()), ToRegister(lir->output()));
break;
case MIRType::Float32:
MOZ_ASSERT(from == MIRType::Int32);
masm.vmovd(ToRegister(lir->input()), ToFloatRegister(lir->output()));
break;
case MIRType::Double:
case MIRType::Int64:
MOZ_CRASH("not handled by this LIR opcode");
default:
MOZ_CRASH("unexpected WasmReinterpret");
}
}
void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
const MAsmJSLoadHeap* mir = ins->mir();
MOZ_ASSERT(mir->access().offset() == 0);
const LAllocation* ptr = ins->ptr();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
AnyRegister out = ToAnyRegister(ins->output());
Scalar::Type accessType = mir->accessType();
OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
if (mir->needsBoundsCheck()) {
ool = new (alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
addOutOfLineCode(ool, mir);
masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ToRegister(ptr),
ToRegister(boundsCheckLimit), ool->entry());
}
Operand srcAddr = toMemoryAccessOperand(ins, 0);
masm.wasmLoad(mir->access(), srcAddr, out);
if (ool) {
masm.bind(ool->rejoin());
}
}
void CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(
OutOfLineLoadTypedArrayOutOfBounds* ool) {
switch (ool->viewType()) {
case Scalar::Int64:
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Simd128:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
case Scalar::Float32:
masm.loadConstantFloat32(float(GenericNaN()), ool->dest().fpu());
break;
case Scalar::Float64:
masm.loadConstantDouble(GenericNaN(), ool->dest().fpu());
break;
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
case Scalar::Uint8Clamped:
Register destReg = ool->dest().gpr();
masm.mov(ImmWord(0), destReg);
break;
}
masm.jmp(ool->rejoin());
}
void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
const MAsmJSStoreHeap* mir = ins->mir();
const LAllocation* ptr = ins->ptr();
const LAllocation* value = ins->value();
const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
Scalar::Type accessType = mir->accessType();
canonicalizeIfDeterministic(accessType, value);
Label rejoin;
if (mir->needsBoundsCheck()) {
masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ToRegister(ptr),
ToRegister(boundsCheckLimit), &rejoin);
}
Operand dstAddr = toMemoryAccessOperand(ins, 0);
masm.wasmStore(mir->access(), ToAnyRegister(value), dstAddr);
if (rejoin.used()) {
masm.bind(&rejoin);
}
}
void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
MWasmAddOffset* mir = lir->mir();
Register base = ToRegister(lir->base());
Register out = ToRegister(lir->output());
if (base != out) {
masm.move32(base, out);
}
masm.add32(Imm32(mir->offset()), out);
Label ok;
masm.j(Assembler::CarryClear, &ok);
masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
masm.bind(&ok);
}
void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
MWasmAddOffset* mir = lir->mir();
Register64 base = ToRegister64(lir->base());
Register64 out = ToOutRegister64(lir);
if (base != out) {
masm.move64(base, out);
}
masm.add64(Imm64(mir->offset()), out);
Label ok;
masm.j(Assembler::CarryClear, &ok);
masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
masm.bind(&ok);
}
void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
MWasmTruncateToInt32* mir = lir->mir();
MIRType inputType = mir->input()->type();
MOZ_ASSERT(inputType == MIRType::Double || inputType == MIRType::Float32);
auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
addOutOfLineCode(ool, mir);
Label* oolEntry = ool->entry();
if (mir->isUnsigned()) {
if (inputType == MIRType::Double) {
masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
oolEntry);
} else if (inputType == MIRType::Float32) {
masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
oolEntry);
} else {
MOZ_CRASH("unexpected type");
}
if (mir->isSaturating()) {
masm.bind(ool->rejoin());
}
return;
}
if (inputType == MIRType::Double) {
masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
oolEntry);
} else if (inputType == MIRType::Float32) {
masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
oolEntry);
} else {
MOZ_CRASH("unexpected type");
}
masm.bind(ool->rejoin());
}
bool CodeGeneratorX86Shared::generateOutOfLineCode() {
if (!CodeGeneratorShared::generateOutOfLineCode()) {
return false;
}
if (deoptLabel_.used()) {
// All non-table-based bailouts will go here.
masm.bind(&deoptLabel_);
// Push the frame size, so the handler can recover the IonScript.
masm.push(Imm32(frameSize()));
TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
masm.jump(handler);
}
return !masm.oom();
}
class BailoutJump {
Assembler::Condition cond_;
public:
explicit BailoutJump(Assembler::Condition cond) : cond_(cond) {}
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler& masm, uint8_t* code) const {
masm.j(cond_, ImmPtr(code), RelocationKind::HARDCODED);
}
#endif
void operator()(MacroAssembler& masm, Label* label) const {
masm.j(cond_, label);
}
};
class BailoutLabel {
Label* label_;
public:
explicit BailoutLabel(Label* label) : label_(label) {}
#ifdef JS_CODEGEN_X86
void operator()(MacroAssembler& masm, uint8_t* code) const {
masm.retarget(label_, ImmPtr(code), RelocationKind::HARDCODED);
}
#endif
void operator()(MacroAssembler& masm, Label* label) const {
masm.retarget(label_, label);
}
};
template <typename T>
void CodeGeneratorX86Shared::bailout(const T& binder, LSnapshot* snapshot) {
encode(snapshot);
// Though the assembler doesn't track all frame pushes, at least make sure
// the known value makes sense. We can't use bailout tables if the stack
// isn't properly aligned to the static frame size.
MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None() && deoptTable_,
frameClass_.frameSize() == masm.framePushed());
#ifdef JS_CODEGEN_X86
// On x64, bailout tables are pointless, because 16 extra bytes are
// reserved per external jump, whereas it takes only 10 bytes to encode a
// a non-table based bailout.
if (assignBailoutId(snapshot)) {
binder(masm, deoptTable_->value +
snapshot->bailoutId() * BAILOUT_TABLE_ENTRY_SIZE);
return;
}
#endif
// We could not use a jump table, either because all bailout IDs were
// reserved, or a jump table is not optimal for this frame size or
// platform. Whatever, we will generate a lazy bailout.
//
// All bailout code is associated with the bytecodeSite of the block we are
// bailing out from.
InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
addOutOfLineCode(ool,
new (alloc()) BytecodeSite(tree, tree->script()->code()));
binder(masm, ool->entry());
}
void CodeGeneratorX86Shared::bailoutIf(Assembler::Condition condition,
LSnapshot* snapshot) {
bailout(BailoutJump(condition), snapshot);
}
void CodeGeneratorX86Shared::bailoutIf(Assembler::DoubleCondition condition,
LSnapshot* snapshot) {
MOZ_ASSERT(Assembler::NaNCondFromDoubleCondition(condition) ==
Assembler::NaN_HandledByCond);
bailoutIf(Assembler::ConditionFromDoubleCondition(condition), snapshot);
}
void CodeGeneratorX86Shared::bailoutFrom(Label* label, LSnapshot* snapshot) {
MOZ_ASSERT_IF(!masm.oom(), label->used() && !label->bound());
bailout(BailoutLabel(label), snapshot);
}
void CodeGeneratorX86Shared::bailout(LSnapshot* snapshot) {
Label label;
masm.jump(&label);
bailoutFrom(&label, snapshot);
}
void CodeGeneratorX86Shared::visitOutOfLineBailout(OutOfLineBailout* ool) {
masm.push(Imm32(ool->snapshot()->snapshotOffset()));
masm.jmp(&deoptLabel_);
}
void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
FloatRegister first = ToFloatRegister(ins->first());
FloatRegister second = ToFloatRegister(ins->second());
#ifdef DEBUG
FloatRegister output = ToFloatRegister(ins->output());
MOZ_ASSERT(first == output);
#endif
bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
if (ins->mir()->isMax()) {
masm.maxDouble(second, first, handleNaN);
} else {
masm.minDouble(second, first, handleNaN);
}
}
void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
FloatRegister first = ToFloatRegister(ins->first());
FloatRegister second = ToFloatRegister(ins->second());
#ifdef DEBUG
FloatRegister output = ToFloatRegister(ins->output());
MOZ_ASSERT(first == output);
#endif
bool handleNaN = !ins->mir()->range() || ins->mir()->range()->canBeNaN();
if (ins->mir()->isMax()) {
masm.maxFloat32(second, first, handleNaN);
} else {
masm.minFloat32(second, first, handleNaN);
}
}
void CodeGenerator::visitClzI(LClzI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
bool knownNotZero = ins->mir()->operandIsNeverZero();
masm.clz32(input, output, knownNotZero);
}
void CodeGenerator::visitCtzI(LCtzI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
bool knownNotZero = ins->mir()->operandIsNeverZero();
masm.ctz32(input, output, knownNotZero);
}
void CodeGenerator::visitPopcntI(LPopcntI* ins) {
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
Register temp =
ins->temp0()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp0());
masm.popcnt32(input, output, temp);
}
void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
ScratchDoubleScope scratch(masm);
Label done, sqrt;
if (!ins->mir()->operandIsNeverNegativeInfinity()) {
// Branch if not -Infinity.
masm.loadConstantDouble(NegativeInfinity<double>(), scratch);
Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered;
if (ins->mir()->operandIsNeverNaN()) {
cond = Assembler::DoubleNotEqual;
}
masm.branchDouble(cond, input, scratch, &sqrt);
// Math.pow(-Infinity, 0.5) == Infinity.
masm.zeroDouble(output);
masm.subDouble(scratch, output);
masm.jump(&done);
masm.bind(&sqrt);
}
if (!ins->mir()->operandIsNeverNegativeZero()) {
// Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
// Adding 0 converts any -0 to 0.
masm.zeroDouble(scratch);
masm.addDouble(input, scratch);
masm.vsqrtsd(scratch, output, output);
} else {
masm.vsqrtsd(input, output, output);
}
masm.bind(&done);
}
class OutOfLineUndoALUOperation
: public OutOfLineCodeBase<CodeGeneratorX86Shared> {
LInstruction* ins_;
public:
explicit OutOfLineUndoALUOperation(LInstruction* ins) : ins_(ins) {}
virtual void accept(CodeGeneratorX86Shared* codegen) override {
codegen->visitOutOfLineUndoALUOperation(this);
}
LInstruction* ins() const { return ins_; }
};
void CodeGenerator::visitAddI(LAddI* ins) {
if (ins->rhs()->isConstant()) {
masm.addl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
} else {
masm.addl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
}
if (ins->snapshot()) {
if (ins->recoversInput()) {
OutOfLineUndoALUOperation* ool =
new (alloc()) OutOfLineUndoALUOperation(ins);
addOutOfLineCode(ool, ins->mir());
masm.j(Assembler::Overflow, ool->entry());
} else {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
}
}
void CodeGenerator::visitAddI64(LAddI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
if (IsConstant(rhs)) {
masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
return;
}
masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
void CodeGenerator::visitSubI(LSubI* ins) {
if (ins->rhs()->isConstant()) {
masm.subl(Imm32(ToInt32(ins->rhs())), ToOperand(ins->lhs()));
} else {
masm.subl(ToOperand(ins->rhs()), ToRegister(ins->lhs()));
}
if (ins->snapshot()) {
if (ins->recoversInput()) {
OutOfLineUndoALUOperation* ool =
new (alloc()) OutOfLineUndoALUOperation(ins);
addOutOfLineCode(ool, ins->mir());
masm.j(Assembler::Overflow, ool->entry());
} else {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
}
}
void CodeGenerator::visitSubI64(LSubI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
if (IsConstant(rhs)) {
masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
return;
}
masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
}
void CodeGeneratorX86Shared::visitOutOfLineUndoALUOperation(
OutOfLineUndoALUOperation* ool) {
LInstruction* ins = ool->ins();
Register reg = ToRegister(ins->getDef(0));
DebugOnly<LAllocation*> lhs = ins->getOperand(0);
LAllocation* rhs = ins->getOperand(1);
MOZ_ASSERT(reg == ToRegister(lhs));
MOZ_ASSERT_IF(rhs->isGeneralReg(), reg != ToRegister(rhs));
// Undo the effect of the ALU operation, which was performed on the output
// register and overflowed. Writing to the output register clobbered an
// input reg, and the original value of the input needs to be recovered
// to satisfy the constraint imposed by any RECOVERED_INPUT operands to
// the bailout snapshot.
if (rhs->isConstant()) {
Imm32 constant(ToInt32(rhs));
if (ins->isAddI()) {
masm.subl(constant, reg);
} else {
masm.addl(constant, reg);
}
} else {
if (ins->isAddI()) {
masm.subl(ToOperand(rhs), reg);
} else {
masm.addl(ToOperand(rhs), reg);
}
}
bailout(ool->ins()->snapshot());
}
class MulNegativeZeroCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
LMulI* ins_;
public:
explicit MulNegativeZeroCheck(LMulI* ins) : ins_(ins) {}
virtual void accept(CodeGeneratorX86Shared* codegen) override {
codegen->visitMulNegativeZeroCheck(this);
}
LMulI* ins() const { return ins_; }
};
void CodeGenerator::visitMulI(LMulI* ins) {
const LAllocation* lhs = ins->lhs();
const LAllocation* rhs = ins->rhs();
MMul* mul = ins->mir();
MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
!mul->canBeNegativeZero() && !mul->canOverflow());
if (rhs->isConstant()) {
// Bailout on -0.0
int32_t constant = ToInt32(rhs);
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition bailoutCond =
(constant == 0) ? Assembler::Signed : Assembler::Equal;
masm.test32(ToRegister(lhs), ToRegister(lhs));
bailoutIf(bailoutCond, ins->snapshot());
}
switch (constant) {
case -1:
masm.negl(ToOperand(lhs));
break;
case 0:
masm.xorl(ToOperand(lhs), ToRegister(lhs));
return; // escape overflow check;
case 1:
// nop
return; // escape overflow check;
case 2:
masm.addl(ToOperand(lhs), ToRegister(lhs));
break;
default:
if (!mul->canOverflow() && constant > 0) {
// Use shift if cannot overflow and constant is power of 2
int32_t shift = FloorLog2(constant);
if ((1 << shift) == constant) {
masm.shll(Imm32(shift), ToRegister(lhs));
return;
}
}
masm.imull(Imm32(ToInt32(rhs)), ToRegister(lhs));
}
// Bailout on overflow
if (mul->canOverflow()) {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
} else {
masm.imull(ToOperand(rhs), ToRegister(lhs));
// Bailout on overflow
if (mul->canOverflow()) {
bailoutIf(Assembler::Overflow, ins->snapshot());
}
if (mul->canBeNegativeZero()) {
// Jump to an OOL path if the result is 0.
MulNegativeZeroCheck* ool = new (alloc()) MulNegativeZeroCheck(ins);
addOutOfLineCode(ool, mul);
masm.test32(ToRegister(lhs), ToRegister(lhs));
masm.j(Assembler::Zero, ool->entry());
masm.bind(ool->rejoin());
}
}
}
void CodeGenerator::visitMulI64(LMulI64* lir) {
const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
if (IsConstant(rhs)) {
int64_t constant = ToInt64(rhs);
switch (constant) {
case -1:
masm.neg64(ToRegister64(lhs));
return;
case 0:
masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
return;
case 1:
// nop
return;
case 2:
masm.add64(ToRegister64(lhs), ToRegister64(lhs));
return;
default:
if (constant > 0) {
// Use shift if constant is power of 2.
int32_t shift = mozilla::FloorLog2(constant);
if (int64_t(1) << shift == constant) {
masm.lshift64(Imm32(shift), ToRegister64(lhs));
return;
}
}
Register temp = ToTempRegisterOrInvalid(lir->temp());
masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
}
} else {
Register temp = ToTempRegisterOrInvalid(lir->temp());
masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
}
}
class ReturnZero : public OutOfLineCodeBase<CodeGeneratorX86Shared> {
Register reg_;
public:
explicit ReturnZero(Register reg) : reg_(reg) {}
virtual void accept(CodeGeneratorX86Shared* codegen) override {
codegen->visitReturnZero(this);
}
Register reg() const { return reg_; }
};
void CodeGeneratorX86Shared::visitReturnZero(ReturnZero* ool) {
masm.mov(ImmWord(0), ool->reg());
masm.jmp(ool->rejoin());
}
void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
MOZ_ASSERT_IF(lhs != rhs, rhs != eax);
MOZ_ASSERT(rhs != edx);
MOZ_ASSERT_IF(output == eax, ToRegister(ins->remainder()) == edx);
ReturnZero* ool = nullptr;
// Put the lhs in eax.
if (lhs != eax) {
masm.mov(lhs, eax);
}
// Prevent divide by zero.
if (ins->canBeDivideByZero()) {
masm.test32(rhs, rhs);
if (ins->mir()->isTruncated()) {
if (ins->trapOnError()) {
Label nonZero;
masm.j(Assembler::NonZero, &nonZero);
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
masm.bind(&nonZero);
} else {
ool = new (alloc()) ReturnZero(output);
masm.j(Assembler::Zero, ool->entry());
}
} else {
bailoutIf(Assembler::Zero, ins->snapshot());
}
}
// Zero extend the lhs into edx to make (edx:eax), since udiv is 64-bit.
masm.mov(ImmWord(0), edx);
masm.udiv(rhs);
// If the remainder is > 0, bailout since this must be a double.
if (ins->mir()->isDiv() && !ins->mir()->toDiv()->canTruncateRemainder()) {
Register remainder = ToRegister(ins->remainder());
masm.test32(remainder, remainder);
bailoutIf(Assembler::NonZero, ins->snapshot());
}
// Unsigned div or mod can return a value that's not a signed int32.
// If our users aren't expecting that, bail.
if (!ins->mir()->isTruncated()) {
masm.test32(output, output);
bailoutIf(Assembler::Signed, ins->snapshot());
}
if (ool) {
addOutOfLineCode(ool, ins->mir());
masm.bind(ool->rejoin());
}
}
void CodeGenerator::visitUDivOrModConstant(LUDivOrModConstant* ins) {
Register lhs = ToRegister(ins->numerator());
Register output = ToRegister(ins->output());
uint32_t d = ins->denominator();
// This emits the division answer into edx or the modulus answer into eax.
MOZ_ASSERT(output == eax || output == edx);
MOZ_ASSERT(lhs != eax && lhs != edx);
bool isDiv = (output == edx);
if (d == 0) {
if (ins->mir()->isTruncated()) {
if (ins->trapOnError()) {
masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
} else {
masm.xorl(output, output);
}
} else {
bailout(ins->snapshot());
}
return;
}
// The denominator isn't a power of 2 (see LDivPowTwoI and LModPowTwoI).
MOZ_ASSERT((d & (d - 1)) != 0);
ReciprocalMulConstants rmc = computeDivisionConstants(d, /* maxLog = */ 32);
// We first compute (M * n) >> 32, where M = rmc.multiplier.
masm.movl(Imm32(rmc.multiplier), eax);
masm.umull(lhs);
if (rmc.multiplier > UINT32_MAX) {
// M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
// ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
// contradicting the proof of correctness in computeDivisionConstants.
MOZ_ASSERT(rmc.shiftAmount > 0);
MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
// We actually computed edx = ((uint32_t(M) * n) >> 32) instead. Since
// (M * n) >> (32 + shift) is the same as (edx + n) >> shift, we can
// correct for the overflow. This case is a bit trickier than the signed
// case, though, as the (edx + n) addition itself can overflow; however,
// note that (edx + n) >> shift == (((n - edx) >> 1) + edx) >> (shift - 1),
// which is overflow-free. See Hacker's Delight, section 10-8 for details.
// Compute (n - edx) >> 1 into eax.
masm.movl(lhs, eax);
masm.subl(edx, eax);
masm.shrl(Imm32(1), eax);
// Finish the computation.
masm.addl(eax, edx);
masm.shrl(Imm32(rmc.shiftAmount - 1), edx);
} else {
masm.shrl(Imm32(rmc.shiftAmount), edx);
}
// We now have the truncated division value in edx. If we're
// computing a modulus or checking whether the division resulted
// in an integer, we need to multiply the obtained value by d and
// finish the computation/check.
if (!isDiv) {
masm.imull(Imm32(d), edx, edx);
masm.movl(lhs, eax);
masm.subl(edx, eax);
// The final result of the modulus op, just computed above by the
// sub instruction, can be a number in the range [2^31, 2^32). If
// this is the case and the modulus is not truncated, we must bail
// out.
if (!ins->mir()->isTruncated()) {
bailoutIf(Assembler::Signed, ins->snapshot());
}
} else if (!ins->mir()->isTruncated()) {
masm.imull(Imm32(d), edx, eax);
masm.cmpl(lhs, eax);
bailoutIf(Assembler::NotEqual, ins->snapshot());
}
}
void CodeGeneratorX86Shared::visitMulNegativeZeroCheck(
MulNegativeZeroCheck* ool) {
LMulI* ins = ool->ins();
Register result = ToRegister(ins->output());
Operand lhsCopy = ToOperand(ins->lhsCopy());
Operand rhs = ToOperand(ins->rhs());
MOZ_ASSERT_IF(lhsCopy.kind() == Operand::REG, lhsCopy.reg() != result.code());
// Result is -0 if lhs or rhs is negative.
masm.movl(lhsCopy, result);
masm.orl(rhs, result);
bailoutIf(Assembler::Signed, ins->snapshot());
masm.mov(ImmWord(0), result);
masm.jmp(ool->rejoin());
}
void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
Register lhs = ToRegister(ins->numerator());
DebugOnly<Register> output = ToRegister(ins->output());
int32_t shift = ins->shift();
bool negativeDivisor = ins->negativeDivisor();
MDiv* mir = ins->mir();
// We use defineReuseInput so these should always be the same, which is
// convenient since all of our instructions here are two-address.
MOZ_ASSERT(lhs == output);
if (!mir->isTruncated() && negativeDivisor) {
// 0 divided by a negative number must return a double.
masm.test32(lhs, lhs);
bailoutIf(Assembler::Zero, ins->snapshot());
}
if (shift) {
if (!mir->isTruncated()) {
// If the remainder is != 0, bailout since this must be a double.
masm.test32(lhs, Imm32(UINT32_MAX >> (32 - shift)));
bailoutIf(Assembler::NonZero, ins->snapshot());
}
if (mir->isUnsigned()) {
masm.shrl(Imm32(shift), lhs);
} else {
// Adjust the value so that shifting produces a correctly
// rounded result when the numerator is negative. See 10-1
// "Signed Division by a Known Power of 2" in Henry
// S. Warren, Jr.'s Hacker's Delight.
if (mir->canBeNegativeDividend() && mir->isTruncated()) {
// Note: There is no need to execute this code, which handles how to
// round the signed integer division towards 0, if we previously bailed
// due to a non-zero remainder.
Register lhsCopy = ToRegister(ins->numeratorCopy());
MOZ_ASSERT(lhsCopy != lhs);
if (shift > 1) {
// Copy the sign bit of the numerator. (= (2^32 - 1) or 0)
masm.sarl(Imm32(31), lhs);
}
// Divide by 2^(32 - shift)
// i.e. (= (2^32 - 1) / 2^(32 - shift) or 0)
// i.e. (= (2^shift - 1) or 0)
masm.shrl(Imm32(32 - shift), lhs);
// If signed, make any 1 bit below the shifted bits to bubble up, such
// that once shifted the value would be rounded towards 0.
masm.addl(lhsCopy, lhs);
}
masm.sarl(Imm32(shift), lhs);
if (negativeDivisor) {
masm.negl(lhs);
}
}
return;
}
if (negativeDivisor) {
// INT32_MIN / -1 overflows.
masm.negl(lhs);
if (!mir->isTruncated()) {
bailoutIf(Assembler::Overflow, ins->snapshot());
} else if (mir->trapOnError()) {
Label ok;
masm.j(Assembler::NoOverflow, &ok);
masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
masm.bind(&ok);
}
} else if (mir->isUnsigned() && !mir->isTruncated()) {
// Unsigned division by 1 can overflow if output is not
// truncated.
masm.test32(lhs, lhs);
bailoutIf(Assembler::Signed, ins->snapshot());
}
}
void CodeGenerator::visitDivOrModConstantI(LDivOrModConstantI* ins) {
Register lhs = ToRegister(ins->numerator());
Register output = ToRegister(ins->output());
int32_t d = ins->denominator();
// This emits the division answer into edx or the modulus answer into eax.
MOZ_ASSERT(output == eax || output == edx);
MOZ_ASSERT(lhs != eax && lhs != edx);
bool isDiv = (output == edx);
// The absolute value of the denominator isn't a power of 2 (see LDivPowTwoI
// and LModPowTwoI).
MOZ_ASSERT((Abs(d) & (Abs(d) - 1)) != 0);
// We will first divide by Abs(d), and negate the answer if d is negative.
// If desired, this can be avoided by generalizing computeDivisionConstants.
ReciprocalMulConstants rmc =
computeDivisionConstants(Abs(d), /* maxLog = */ 31);
// We first compute (M * n) >> 32, where M = rmc.multiplier.
masm.movl(Imm32(rmc.multiplier), eax);
masm.imull(lhs);
if (rmc.multiplier > INT32_MAX) {
MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32));
// We actually computed edx = ((int32_t(M) * n) >> 32) instead. Since
// (M * n) >> 32 is the same as (edx + n), we can correct for the overflow.
// (edx + n) can't overflow, as n and edx have opposite signs because
// int32_t(M) is negative.
masm.addl(lhs, edx);
}
// (M * n) >> (32 + shift) is the truncated division answer if n is
// non-negative, as proved in the comments of computeDivisionConstants. We
// must add 1 later if n is negative to get the right answer in all cases.
masm.sarl(Imm32(rmc.shiftAmount), edx);
// We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be
// computed with just a sign-extending shift of 31 bits.
if (ins->canBeNegativeDividend()) {
masm.movl(lhs, eax);
masm.sarl(Imm32(31), eax);
masm.subl(eax, edx);
}
// After this, edx contains the correct truncated division result.
if (d < 0) {
masm.negl(edx);
}
if (!isDiv) {
masm.imull(Imm32(-d), edx, eax);
masm.addl(lhs, eax);
}
if (!ins->mir()->isTruncated()) {
if (isDiv) {
// This is a division op. Multiply the obtained value by d to check if
// the correct answer is an integer. This cannot overflow, since |d| > 1.
masm.imull(Imm32(d), edx, eax);
masm.cmp32(lhs, eax);
bailoutIf(Assembler::NotEqual, ins->snapshot());
// If lhs is zero and the divisor is negative, the answer should have
// been -0.
if (d < 0) {
masm.test32(lhs, lhs);
bailoutIf(Assembler::Zero, ins->snapshot());
}
} else if (ins->canBeNegativeDividend()) {
// This is a mod op. If the computed value is zero and lhs
// is negative, the answer should have been -0.
Label done;
masm.cmp32(lhs, Imm32(0));
masm.j(Assembler::GreaterThanOrEqual, &done);
masm.test32(eax, eax);
bailoutIf(Assembler::Zero, ins->snapshot());
masm.bind(&done);
}
}
}
void CodeGenerator::visitDivI(LDivI* ins) {
Register remainder = ToRegister(ins->remainder());
Register lhs = ToRegister(ins->lhs());
<