Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/Lowering.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/EndianUtils.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/MathAlgorithms.h"
#include <type_traits>
#include "jit/ABIArgGenerator.h"
#include "jit/IonGenericCallStub.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitSpewer.h"
#include "jit/LIR.h"
#include "jit/MacroAssembler.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "jit/SharedICRegisters.h"
#include "js/experimental/JitInfo.h" // JSJitInfo
#include "util/Memory.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmFeatures.h" // for wasm::ReportSimdAnalysis
#include "wasm/WasmInstanceData.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/BytecodeUtil-inl.h"
using namespace js;
using namespace jit;
using JS::GenericNaN;
using mozilla::DebugOnly;
LBoxAllocation LIRGenerator::useBoxFixedAtStart(MDefinition* mir,
ValueOperand op) {
#if defined(JS_NUNBOX32)
return useBoxFixed(mir, op.typeReg(), op.payloadReg(), true);
#elif defined(JS_PUNBOX64)
return useBoxFixed(mir, op.valueReg(), op.scratchReg(), true);
#endif
}
LBoxAllocation LIRGenerator::useBoxAtStart(MDefinition* mir,
LUse::Policy policy) {
return useBox(mir, policy, /* useAtStart = */ true);
}
void LIRGenerator::visitParameter(MParameter* param) {
ptrdiff_t offset;
if (param->index() == MParameter::THIS_SLOT) {
offset = THIS_FRAME_ARGSLOT;
} else {
offset = 1 + param->index();
}
LParameter* ins = new (alloc()) LParameter;
defineBox(ins, param, LDefinition::FIXED);
offset *= sizeof(Value);
#if defined(JS_NUNBOX32)
# if MOZ_BIG_ENDIAN()
ins->getDef(0)->setOutput(LArgument(offset));
ins->getDef(1)->setOutput(LArgument(offset + 4));
# else
ins->getDef(0)->setOutput(LArgument(offset + 4));
ins->getDef(1)->setOutput(LArgument(offset));
# endif
#elif defined(JS_PUNBOX64)
ins->getDef(0)->setOutput(LArgument(offset));
#endif
}
void LIRGenerator::visitCallee(MCallee* ins) {
define(new (alloc()) LCallee(), ins);
}
void LIRGenerator::visitIsConstructing(MIsConstructing* ins) {
define(new (alloc()) LIsConstructing(), ins);
}
void LIRGenerator::visitGoto(MGoto* ins) {
add(new (alloc()) LGoto(ins->target()));
}
void LIRGenerator::visitTableSwitch(MTableSwitch* tableswitch) {
MDefinition* opd = tableswitch->getOperand(0);
// There should be at least 1 successor. The default case!
MOZ_ASSERT(tableswitch->numSuccessors() > 0);
// If there are no cases, the default case is always taken.
if (tableswitch->numSuccessors() == 1) {
add(new (alloc()) LGoto(tableswitch->getDefault()));
return;
}
// If we don't know the type.
if (opd->type() == MIRType::Value) {
LTableSwitchV* lir = newLTableSwitchV(tableswitch);
add(lir);
return;
}
// Case indices are numeric, so other types will always go to the default
// case.
if (opd->type() != MIRType::Int32 && opd->type() != MIRType::Double) {
add(new (alloc()) LGoto(tableswitch->getDefault()));
return;
}
// Return an LTableSwitch, capable of handling either an integer or
// floating-point index.
LAllocation index;
LDefinition tempInt;
if (opd->type() == MIRType::Int32) {
index = useRegisterAtStart(opd);
tempInt = tempCopy(opd, 0);
} else {
index = useRegister(opd);
tempInt = temp(LDefinition::GENERAL);
}
add(newLTableSwitch(index, tempInt, tableswitch));
}
void LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed* ins) {
LCheckOverRecursed* lir = new (alloc()) LCheckOverRecursed();
add(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewArray(MNewArray* ins) {
LNewArray* lir = new (alloc()) LNewArray(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewArrayDynamicLength(MNewArrayDynamicLength* ins) {
MDefinition* length = ins->length();
MOZ_ASSERT(length->type() == MIRType::Int32);
LNewArrayDynamicLength* lir =
new (alloc()) LNewArrayDynamicLength(useRegister(length), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewIterator(MNewIterator* ins) {
LNewIterator* lir = new (alloc()) LNewIterator(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewTypedArray(MNewTypedArray* ins) {
LNewTypedArray* lir = new (alloc()) LNewTypedArray(temp(), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewTypedArrayDynamicLength(
MNewTypedArrayDynamicLength* ins) {
MDefinition* length = ins->length();
MOZ_ASSERT(length->type() == MIRType::Int32);
LNewTypedArrayDynamicLength* lir =
new (alloc()) LNewTypedArrayDynamicLength(useRegister(length), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewTypedArrayFromArray(MNewTypedArrayFromArray* ins) {
MDefinition* array = ins->array();
MOZ_ASSERT(array->type() == MIRType::Object);
auto* lir = new (alloc()) LNewTypedArrayFromArray(useRegisterAtStart(array));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewTypedArrayFromArrayBuffer(
MNewTypedArrayFromArrayBuffer* ins) {
MDefinition* arrayBuffer = ins->arrayBuffer();
MDefinition* byteOffset = ins->byteOffset();
MDefinition* length = ins->length();
MOZ_ASSERT(arrayBuffer->type() == MIRType::Object);
MOZ_ASSERT(byteOffset->type() == MIRType::Value);
MOZ_ASSERT(length->type() == MIRType::Value);
auto* lir = new (alloc()) LNewTypedArrayFromArrayBuffer(
useRegisterAtStart(arrayBuffer), useBoxAtStart(byteOffset),
useBoxAtStart(length));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewObject(MNewObject* ins) {
LNewObject* lir = new (alloc()) LNewObject(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitBindFunction(MBindFunction* ins) {
MDefinition* target = ins->target();
MOZ_ASSERT(target->type() == MIRType::Object);
if (!lowerCallArguments(ins)) {
abort(AbortReason::Alloc, "OOM: LIRGenerator::visitBindFunction");
return;
}
auto* lir = new (alloc())
LBindFunction(useFixedAtStart(target, CallTempReg0),
tempFixed(CallTempReg1), tempFixed(CallTempReg2));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewBoundFunction(MNewBoundFunction* ins) {
auto* lir = new (alloc()) LNewBoundFunction(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewPlainObject(MNewPlainObject* ins) {
LNewPlainObject* lir = new (alloc()) LNewPlainObject(temp(), temp(), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewArrayObject(MNewArrayObject* ins) {
LNewArrayObject* lir = new (alloc()) LNewArrayObject(temp(), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewNamedLambdaObject(MNewNamedLambdaObject* ins) {
LNewNamedLambdaObject* lir = new (alloc()) LNewNamedLambdaObject(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewCallObject(MNewCallObject* ins) {
LNewCallObject* lir = new (alloc()) LNewCallObject(temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitNewStringObject(MNewStringObject* ins) {
MOZ_ASSERT(ins->input()->type() == MIRType::String);
LNewStringObject* lir =
new (alloc()) LNewStringObject(useRegister(ins->input()), temp());
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitInitElemGetterSetter(MInitElemGetterSetter* ins) {
LInitElemGetterSetter* lir = new (alloc()) LInitElemGetterSetter(
useRegisterAtStart(ins->object()), useBoxAtStart(ins->id()),
useRegisterAtStart(ins->value()));
add(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitMutateProto(MMutateProto* ins) {
LMutateProto* lir = new (alloc()) LMutateProto(
useRegisterAtStart(ins->object()), useBoxAtStart(ins->value()));
add(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitInitPropGetterSetter(MInitPropGetterSetter* ins) {
LInitPropGetterSetter* lir = new (alloc()) LInitPropGetterSetter(
useRegisterAtStart(ins->object()), useRegisterAtStart(ins->value()));
add(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitCreateThis(MCreateThis* ins) {
LCreateThis* lir =
new (alloc()) LCreateThis(useRegisterOrConstantAtStart(ins->callee()),
useRegisterOrConstantAtStart(ins->newTarget()));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitCreateArgumentsObject(MCreateArgumentsObject* ins) {
LAllocation callObj = useRegisterAtStart(ins->getCallObject());
LCreateArgumentsObject* lir = new (alloc())
LCreateArgumentsObject(callObj, tempFixed(CallTempReg0),
tempFixed(CallTempReg1), tempFixed(CallTempReg2));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitCreateInlinedArgumentsObject(
MCreateInlinedArgumentsObject* ins) {
LAllocation callObj = useRegisterAtStart(ins->getCallObject());
LAllocation callee = useRegisterAtStart(ins->getCallee());
uint32_t numActuals = ins->numActuals();
uint32_t numOperands = numActuals * BOX_PIECES +
LCreateInlinedArgumentsObject::NumNonArgumentOperands;
auto* lir = allocateVariadic<LCreateInlinedArgumentsObject>(
numOperands, tempFixed(CallTempReg0), tempFixed(CallTempReg1));
if (!lir) {
abort(AbortReason::Alloc,
"OOM: LIRGenerator::visitCreateInlinedArgumentsObject");
return;
}
lir->setOperand(LCreateInlinedArgumentsObject::CallObj, callObj);
lir->setOperand(LCreateInlinedArgumentsObject::Callee, callee);
for (uint32_t i = 0; i < numActuals; i++) {
MDefinition* arg = ins->getArg(i);
uint32_t index = LCreateInlinedArgumentsObject::ArgIndex(i);
lir->setBoxOperand(index, useBoxOrTypedOrConstant(arg,
/*useConstant = */ true,
/*useAtStart = */ true));
}
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitGetInlinedArgument(MGetInlinedArgument* ins) {
#if defined(JS_PUNBOX64)
// On 64-bit architectures, we don't support boxing a typed register
// in-place without using a scratch register, so the result register
// can't be the same as any of the inputs. Fortunately, those
// architectures have registers to spare.
const bool useAtStart = false;
#else
const bool useAtStart = true;
#endif
LAllocation index =
useAtStart ? useRegisterAtStart(ins->index()) : useRegister(ins->index());
uint32_t numActuals = ins->numActuals();
uint32_t numOperands =
numActuals * BOX_PIECES + LGetInlinedArgument::NumNonArgumentOperands;
auto* lir = allocateVariadic<LGetInlinedArgument>(numOperands);
if (!lir) {
abort(AbortReason::Alloc, "OOM: LIRGenerator::visitGetInlinedArgument");
return;
}
lir->setOperand(LGetInlinedArgument::Index, index);
for (uint32_t i = 0; i < numActuals; i++) {
MDefinition* arg = ins->getArg(i);
uint32_t index = LGetInlinedArgument::ArgIndex(i);
lir->setBoxOperand(
index, useBoxOrTypedOrConstant(arg,
/*useConstant = */ true, useAtStart));
}
defineBox(lir, ins);
}
void LIRGenerator::visitGetInlinedArgumentHole(MGetInlinedArgumentHole* ins) {
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_MIPS64)
// On some 64-bit architectures, we don't support boxing a typed
// register in-place without using a scratch register, so the result
// register can't be the same as any of the inputs. Fortunately,
// those architectures have registers to spare.
const bool useAtStart = false;
#else
const bool useAtStart = true;
#endif
LAllocation index =
useAtStart ? useRegisterAtStart(ins->index()) : useRegister(ins->index());
uint32_t numActuals = ins->numActuals();
uint32_t numOperands =
numActuals * BOX_PIECES + LGetInlinedArgumentHole::NumNonArgumentOperands;
auto* lir = allocateVariadic<LGetInlinedArgumentHole>(numOperands);
if (!lir) {
abort(AbortReason::Alloc, "OOM: LIRGenerator::visitGetInlinedArgumentHole");
return;
}
lir->setOperand(LGetInlinedArgumentHole::Index, index);
for (uint32_t i = 0; i < numActuals; i++) {
MDefinition* arg = ins->getArg(i);
uint32_t index = LGetInlinedArgumentHole::ArgIndex(i);
lir->setBoxOperand(
index, useBoxOrTypedOrConstant(arg,
/*useConstant = */ true, useAtStart));
}
assignSnapshot(lir, ins->bailoutKind());
defineBox(lir, ins);
}
void LIRGenerator::visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins) {
LAllocation argsObj = useRegister(ins->argsObject());
LGetArgumentsObjectArg* lir =
new (alloc()) LGetArgumentsObjectArg(argsObj, temp());
defineBox(lir, ins);
}
void LIRGenerator::visitSetArgumentsObjectArg(MSetArgumentsObjectArg* ins) {
LAllocation argsObj = useRegister(ins->argsObject());
LSetArgumentsObjectArg* lir = new (alloc())
LSetArgumentsObjectArg(argsObj, useBox(ins->value()), temp());
add(lir, ins);
}
void LIRGenerator::visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
MDefinition* index = ins->index();
MOZ_ASSERT(index->type() == MIRType::Int32);
auto* lir = new (alloc())
LLoadArgumentsObjectArg(useRegister(argsObj), useRegister(index), temp());
assignSnapshot(lir, ins->bailoutKind());
defineBox(lir, ins);
}
void LIRGenerator::visitLoadArgumentsObjectArgHole(
MLoadArgumentsObjectArgHole* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
MDefinition* index = ins->index();
MOZ_ASSERT(index->type() == MIRType::Int32);
auto* lir = new (alloc()) LLoadArgumentsObjectArgHole(
useRegister(argsObj), useRegister(index), temp());
assignSnapshot(lir, ins->bailoutKind());
defineBox(lir, ins);
}
void LIRGenerator::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
MDefinition* index = ins->index();
MOZ_ASSERT(index->type() == MIRType::Int32);
auto* lir = new (alloc())
LInArgumentsObjectArg(useRegister(argsObj), useRegister(index), temp());
assignSnapshot(lir, ins->bailoutKind());
define(lir, ins);
}
void LIRGenerator::visitArgumentsObjectLength(MArgumentsObjectLength* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
auto* lir = new (alloc()) LArgumentsObjectLength(useRegister(argsObj));
assignSnapshot(lir, ins->bailoutKind());
define(lir, ins);
}
void LIRGenerator::visitArrayFromArgumentsObject(
MArrayFromArgumentsObject* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
auto* lir =
new (alloc()) LArrayFromArgumentsObject(useRegisterAtStart(argsObj));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitGuardArgumentsObjectFlags(
MGuardArgumentsObjectFlags* ins) {
MDefinition* argsObj = ins->argsObject();
MOZ_ASSERT(argsObj->type() == MIRType::Object);
auto* lir =
new (alloc()) LGuardArgumentsObjectFlags(useRegister(argsObj), temp());
assignSnapshot(lir, ins->bailoutKind());
add(lir, ins);
redefine(ins, argsObj);
}
void LIRGenerator::visitBoundFunctionNumArgs(MBoundFunctionNumArgs* ins) {
MDefinition* obj = ins->object();
MOZ_ASSERT(obj->type() == MIRType::Object);
auto* lir = new (alloc()) LBoundFunctionNumArgs(useRegisterAtStart(obj));
define(lir, ins);
}
void LIRGenerator::visitGuardBoundFunctionIsConstructor(
MGuardBoundFunctionIsConstructor* ins) {
MOZ_ASSERT(ins->object()->type() == MIRType::Object);
auto* lir = new (alloc())
LGuardBoundFunctionIsConstructor(useRegister(ins->object()));
assignSnapshot(lir, ins->bailoutKind());
add(lir, ins);
redefine(ins, ins->object());
}
void LIRGenerator::visitReturnFromCtor(MReturnFromCtor* ins) {
LReturnFromCtor* lir = new (alloc())
LReturnFromCtor(useBox(ins->value()), useRegister(ins->object()));
define(lir, ins);
}
void LIRGenerator::visitBoxNonStrictThis(MBoxNonStrictThis* ins) {
MOZ_ASSERT(ins->type() == MIRType::Object);
MOZ_ASSERT(ins->input()->type() == MIRType::Value);
auto* lir = new (alloc()) LBoxNonStrictThis(useBox(ins->input()));
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::visitImplicitThis(MImplicitThis* ins) {
MDefinition* env = ins->envChain();
MOZ_ASSERT(env->type() == MIRType::Object);
LImplicitThis* lir = new (alloc()) LImplicitThis(useRegisterAtStart(env));
defineReturn(lir, ins);
assignSafepoint(lir, ins);
}
template <typename T>
bool LIRGenerator::lowerCallArguments(T* call) {
uint32_t argc = call->numStackArgs();
// Align the arguments of a call such that the callee would keep the same
// alignment as the caller.
uint32_t baseSlot = 0;
if (JitStackValueAlignment > 1) {
baseSlot = AlignBytes(argc, JitStackValueAlignment);
} else {
baseSlot = argc;
}
// Save the maximum number of argument, such that we can have one unique
// frame size.
if (baseSlot > maxargslots_) {
maxargslots_ = baseSlot;
}
for (size_t i = 0; i < argc; i++) {
MDefinition* arg = call->getArg(i);
uint32_t argslot = baseSlot - i;
// Values take a slow path.
if (arg->type() == MIRType::Value) {
LStackArgV* stack = new (alloc()) LStackArgV(useBox(arg), argslot);
add(stack);
} else {
// Known types can move constant types and/or payloads.
LStackArgT* stack = new (alloc())
LStackArgT(useRegisterOrConstant(arg), argslot, arg->type());
add(stack);
}
if (!alloc().ensureBallast()) {
return false;
}
}
return true;
}
void LIRGenerator::visitCall(MCall* call) {
MOZ_ASSERT(call->getCallee()->type() == MIRType::Object);
// In case of oom, skip the rest of the allocations.
if (!lowerCallArguments(call)) {
abort(AbortReason::Alloc, "OOM: LIRGenerator::visitCall");
return;
}
WrappedFunction* target = call->getSingleTarget();
LInstruction* lir;
if (call->isCallDOMNative()) {
// Call DOM functions.
MOZ_ASSERT(target && target->isNativeWithoutJitEntry());
Register cxReg, objReg, privReg, argsReg;
GetTempRegForIntArg(0, 0, &cxReg);
GetTempRegForIntArg(1, 0, &objReg);
GetTempRegForIntArg(2, 0, &privReg);
mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &argsReg);
MOZ_ASSERT(ok, "How can we not have four temp registers?");
lir = new (alloc()) LCallDOMNative(tempFixed(cxReg), tempFixed(objReg),
tempFixed(privReg), tempFixed(argsReg));
} else if (target) {
// Call known functions.
if (target->isNativeWithoutJitEntry()) {
Register cxReg, numReg, vpReg, tmpReg;
GetTempRegForIntArg(0, 0, &cxReg);
GetTempRegForIntArg(1, 0, &numReg);
GetTempRegForIntArg(2, 0, &vpReg);
// Even though this is just a temp reg, use the same API to avoid
// register collisions.
mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &tmpReg);
MOZ_ASSERT(ok, "How can we not have four temp registers?");
lir = new (alloc()) LCallNative(tempFixed(cxReg), tempFixed(numReg),
tempFixed(vpReg), tempFixed(tmpReg));
} else {
lir = new (alloc()) LCallKnown(useRegisterAtStart(call->getCallee()),
tempFixed(CallTempReg0));
}
} else {
// Call anything, using the most generic code.
lir = new (alloc()) LCallGeneric(
useFixedAtStart(call->getCallee(), IonGenericCallCalleeReg),
tempFixed(IonGenericCallArgcReg));
}
defineReturn(lir, call);
assignSafepoint(lir, call);
}
void LIRGenerator::visitCallClassHook(MCallClassHook* call) {
MDefinition* callee = call->getCallee();
MOZ_ASSERT(callee->type() == MIRType::Object);
// In case of oom, skip the rest of the allocations.
if (!lowerCallArguments(call)) {
abort(AbortReason::Alloc, "OOM: LIRGenerator::visitCallClassHook");
return;
}
Register cxReg, numReg, vpReg, tmpReg;
GetTempRegForIntArg(0, 0, &cxReg);
GetTempRegForIntArg(1, 0, &numReg);
GetTempRegForIntArg(2, 0, &vpReg);
// Even though this is just a temp reg, use the same API to avoid
// register collisions.
mozilla::DebugOnly<bool> ok = GetTempRegForIntArg(3, 0, &tmpReg);
MOZ_ASSERT(ok, "How can we not have four temp registers?");
auto* lir = new (alloc())
LCallClassHook(useRegisterAtStart(callee), tempFixed(cxReg),
tempFixed(numReg), tempFixed(vpReg), tempFixed(tmpReg));
defineReturn(lir, call);
assignSafepoint(lir, call);
}
void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
// Assert if the return value is already erased.
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
LApplyArgsGeneric* lir = new (alloc()) LApplyArgsGeneric(
useFixedAtStart(apply->getFunction(), CallTempReg3),
useFixedAtStart(apply->getArgc(), CallTempReg0),
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
tempFixed(CallTempReg1), // object register
tempFixed(CallTempReg2)); // stack counter register
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
defineReturn(lir, apply);
assignSafepoint(lir, apply);
}
void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
// Assert if the return value is already erased.
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
LApplyArgsObj* lir = new (alloc()) LApplyArgsObj(
useFixedAtStart(apply->getFunction(), CallTempReg3),
useFixedAtStart(apply->getArgsObj(), CallTempReg0),
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
tempFixed(CallTempReg1), // object register
tempFixed(CallTempReg2)); // stack counter register
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, apply->bailoutKind());
defineReturn(lir, apply);
assignSafepoint(lir, apply);
}
void LIRGenerator::visitApplyArray(MApplyArray* apply) {
MOZ_ASSERT(apply->getFunction()->type() == MIRType::Object);
// Assert if the return value is already erased.
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
LApplyArrayGeneric* lir = new (alloc()) LApplyArrayGeneric(
useFixedAtStart(apply->getFunction(), CallTempReg3),
useFixedAtStart(apply->getElements(), CallTempReg0),
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5),
tempFixed(CallTempReg1), // object register
tempFixed(CallTempReg2)); // stack counter register
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
assignSnapshot(lir, apply->bailoutKind());
defineReturn(lir, apply);
assignSafepoint(lir, apply);
}
void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
MOZ_ASSERT(mir->getFunction()->type() == MIRType::Object);
MOZ_ASSERT(mir->getArgc()->type() == MIRType::Int32);
MOZ_ASSERT(mir->getNewTarget()->type() == MIRType::Object);
MOZ_ASSERT(mir->getThis()->type() == MIRType::Value);
// Assert if the return value is already erased.
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto* lir = new (alloc()) LConstructArgsGeneric(
useFixedAtStart(mir->getFunction(), CallTempReg3),
useFixedAtStart(mir->getArgc(), CallTempReg0),
useFixedAtStart(mir->getNewTarget(), CallTempReg1),
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
tempFixed(CallTempReg2));
// Bailout is needed in the case of too many values in the arguments array.
assignSnapshot(lir, mir->bailoutKind());
defineReturn(lir, mir);
assignSafepoint(lir, mir);
}
void LIRGenerator::visitConstructArray(MConstructArray* mir) {
MOZ_ASSERT(mir->getFunction()->type() == MIRType::Object);
MOZ_ASSERT(mir->getElements()->type() == MIRType::Elements);
MOZ_ASSERT(mir->getNewTarget()->type() == MIRType::Object);
MOZ_ASSERT(mir->getThis()->type() == MIRType::Value);
// Assert if the return value is already erased.
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto* lir = new (alloc()) LConstructArrayGeneric(
useFixedAtStart(mir->getFunction(), CallTempReg3),
useFixedAtStart(mir->getElements(), CallTempReg0),
useFixedAtStart(mir->getNewTarget(), CallTempReg1),
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5),
tempFixed(CallTempReg2));
// Bailout is needed in the case of too many values in the array, or empty
// space at the end of the array.
assignSnapshot(lir, mir->bailoutKind());
defineReturn(lir, mir);
assignSafepoint(lir, mir);
}
void LIRGenerator::visitBail(MBail* bail) {
LBail* lir = new (alloc()) LBail();
assignSnapshot(lir, bail->bailoutKind());
add(lir, bail);
}
void LIRGenerator::visitUnreachable(MUnreachable* unreachable) {
LUnreachable* lir = new (alloc()) LUnreachable();
add(lir, unreachable);
}
void LIRGenerator::visitEncodeSnapshot(MEncodeSnapshot* mir) {
LEncodeSnapshot* lir = new (alloc()) LEncodeSnapshot();
assignSnapshot(lir, mir->bailoutKind());
add(lir, mir);
}
void LIRGenerator::visitUnreachableResult(MUnreachableResult* mir) {
if (mir->type() == MIRType::Value) {
auto* lir = new (alloc()) LUnreachableResultV();
defineBox(lir, mir);
} else {
auto* lir = new (alloc()) LUnreachableResultT();
define(lir, mir);
}
}
void LIRGenerator::visitAssertFloat32(MAssertFloat32* assertion) {
MIRType type = assertion->input()->type();
DebugOnly<bool> checkIsFloat32 = assertion->mustBeFloat32();
if (type != MIRType::Value && !JitOptions.eagerIonCompilation()) {
MOZ_ASSERT_IF(checkIsFloat32, type == MIRType::Float32);
MOZ_ASSERT_IF(!checkIsFloat32, type != MIRType::Float32);
}
}
void LIRGenerator::visitAssertRecoveredOnBailout(
MAssertRecoveredOnBailout* assertion) {
MOZ_CRASH("AssertRecoveredOnBailout nodes are always recovered on bailouts.");
}
[[nodiscard]] static JSOp ReorderComparison(JSOp op, MDefinition** lhsp,
MDefinition** rhsp) {
MDefinition* lhs = *lhsp;
MDefinition* rhs = *rhsp;
if (lhs->maybeConstantValue()) {
*rhsp = lhs;
*lhsp = rhs;
return ReverseCompareOp(op);
}
return op;
}
void LIRGenerator::visitTest(MTest* test) {
MDefinition* opd = test->getOperand(0);
MBasicBlock* ifTrue = test->ifTrue();
MBasicBlock* ifFalse = test->ifFalse();
// String is converted to length of string in the type analysis phase (see
// TestPolicy).
MOZ_ASSERT(opd->type() != MIRType::String);
// Testing a constant.
if (MConstant* constant = opd->maybeConstantValue()) {
bool b;
if (constant->valueToBoolean(&b)) {
add(new (alloc()) LGoto(b ? ifTrue : ifFalse));
return;
}
}
if (opd->type() == MIRType::Value) {
auto* lir = new (alloc()) LTestVAndBranch(
ifTrue, ifFalse, useBox(opd), tempDouble(), tempToUnbox(), temp());
add(lir, test);
return;
}
// Objects are truthy, except if it might emulate undefined.
if (opd->type() == MIRType::Object) {
add(new (alloc())
LTestOAndBranch(useRegister(opd), ifTrue, ifFalse, temp()),
test);
return;
}
// These must be explicitly sniffed out since they are constants and have
// no payload.
if (opd->type() == MIRType::Undefined || opd->type() == MIRType::Null) {
add(new (alloc()) LGoto(ifFalse));
return;
}
// All symbols are truthy.
if (opd->type() == MIRType::Symbol) {
add(new (alloc()) LGoto(ifTrue));
return;
}
// Try to match the pattern
// test=MTest(
// comp=MCompare(
// {EQ,NE} for {Int,UInt}{32,64},
// bitAnd={MBitAnd,MWasmBinaryBitwise(And{32,64})}(x, y),
// MConstant(0)
// )
// )
// and produce a single LBitAndAndBranch node. This requires both `comp`
// and `bitAnd` to be marked emit-at-uses. Since we can't use
// LBitAndAndBranch to represent a 64-bit AND on a 32-bit target, the 64-bit
// case is restricted to 64-bit targets.
if (opd->isCompare() && opd->isEmittedAtUses()) {
#ifdef JS_64BIT
constexpr bool targetIs64 = true;
#else
constexpr bool targetIs64 = false;
#endif
MCompare* comp = opd->toCompare();
Assembler::Condition compCond =
JSOpToCondition(comp->compareType(), comp->jsop());
MDefinition* compL = comp->getOperand(0);
MDefinition* compR = comp->getOperand(1);
if ((comp->compareType() == MCompare::Compare_Int32 ||
comp->compareType() == MCompare::Compare_UInt32 ||
(targetIs64 && comp->compareType() == MCompare::Compare_Int64) ||
(targetIs64 && comp->compareType() == MCompare::Compare_UInt64)) &&
(compCond == Assembler::Equal || compCond == Assembler::NotEqual) &&
compR->isConstant() &&
(compR->toConstant()->isInt32(0) ||
(targetIs64 && compR->toConstant()->isInt64(0))) &&
(compL->isBitAnd() || (compL->isWasmBinaryBitwise() &&
compL->toWasmBinaryBitwise()->subOpcode() ==
MWasmBinaryBitwise::SubOpcode::And))) {
// The MCompare is OK; now check its first operand (the and-ish node).
MDefinition* bitAnd = compL;
MDefinition* bitAndL = bitAnd->getOperand(0);
MDefinition* bitAndR = bitAnd->getOperand(1);
MIRType bitAndLTy = bitAndL->type();
MIRType bitAndRTy = bitAndR->type();
if (bitAnd->isEmittedAtUses() && bitAndLTy == bitAndRTy &&
(bitAndLTy == MIRType::Int32 ||
(targetIs64 && bitAndLTy == MIRType::Int64))) {
// Pattern match succeeded.
ReorderCommutative(&bitAndL, &bitAndR, test);
if (compCond == Assembler::Equal) {
compCond = Assembler::Zero;
} else if (compCond == Assembler::NotEqual) {
compCond = Assembler::NonZero;
} else {
MOZ_ASSERT_UNREACHABLE("inequality operators cannot be folded");
}
MOZ_ASSERT_IF(!targetIs64, bitAndLTy == MIRType::Int32);
lowerForBitAndAndBranch(
new (alloc()) LBitAndAndBranch(
ifTrue, ifFalse, bitAndLTy == MIRType::Int64, compCond),
test, bitAndL, bitAndR);
return;
}
}
}
// Check if the operand for this test is a compare operation. If it is, we
// want to emit an LCompare*AndBranch rather than an LTest*AndBranch, to fuse
// the compare and jump instructions.
if (opd->isCompare() && opd->isEmittedAtUses()) {
MCompare* comp = opd->toCompare();
MDefinition* left = comp->lhs();
MDefinition* right = comp->rhs();
// Try to fold the comparison so that we don't have to handle all cases.
bool result;
if (comp->tryFold(&result)) {
add(new (alloc()) LGoto(result ? ifTrue : ifFalse));
return;
}
// Emit LCompare*AndBranch.
// Compare and branch null/undefined.
// The second operand has known null/undefined type,
// so just test the first operand.
if (comp->compareType() == MCompare::Compare_Null ||
comp->compareType() == MCompare::Compare_Undefined) {
if (left->type() == MIRType::Object) {
auto* lir = new (alloc()) LIsNullOrLikeUndefinedAndBranchT(
comp, useRegister(left), ifTrue, ifFalse, temp());
add(lir, test);
return;
}
if (IsLooseEqualityOp(comp->jsop())) {
auto* lir = new (alloc()) LIsNullOrLikeUndefinedAndBranchV(
comp, ifTrue, ifFalse, useBox(left), temp(), tempToUnbox());
add(lir, test);
return;
}
if (comp->compareType() == MCompare::Compare_Null) {
auto* lir =
new (alloc()) LIsNullAndBranch(comp, ifTrue, ifFalse, useBox(left));
add(lir, test);
return;
}
auto* lir = new (alloc())
LIsUndefinedAndBranch(comp, ifTrue, ifFalse, useBox(left));
add(lir, test);
return;
}
// Compare and branch Int32, Symbol, Object, or WasmAnyRef pointers.
if (comp->isInt32Comparison() ||
comp->compareType() == MCompare::Compare_UInt32 ||
comp->compareType() == MCompare::Compare_UIntPtr ||
comp->compareType() == MCompare::Compare_Object ||
comp->compareType() == MCompare::Compare_Symbol ||
comp->compareType() == MCompare::Compare_WasmAnyRef) {
JSOp op = ReorderComparison(comp->jsop(), &left, &right);
LAllocation lhs = useRegister(left);
LAllocation rhs;
if (comp->isInt32Comparison() ||
comp->compareType() == MCompare::Compare_UInt32 ||
comp->compareType() == MCompare::Compare_UIntPtr) {
rhs = useAnyOrInt32Constant(right);
} else {
rhs = useAny(right);
}
LCompareAndBranch* lir =
new (alloc()) LCompareAndBranch(comp, op, lhs, rhs, ifTrue, ifFalse);
add(lir, test);
return;
}
// Compare and branch Int64.
if (comp->compareType() == MCompare::Compare_Int64 ||
comp->compareType() == MCompare::Compare_UInt64) {
JSOp op = ReorderComparison(comp->jsop(), &left, &right);
lowerForCompareI64AndBranch(test, comp, op, left, right, ifTrue, ifFalse);
return;
}
// Compare and branch doubles.
if (comp->isDoubleComparison()) {
LAllocation lhs = useRegister(left);
LAllocation rhs = useRegister(right);
LCompareDAndBranch* lir =
new (alloc()) LCompareDAndBranch(comp, lhs, rhs, ifTrue, ifFalse);
add(lir, test);
return;
}
// Compare and branch floats.
if (comp->isFloat32Comparison()) {
LAllocation lhs = useRegister(left);
LAllocation rhs = useRegister(right);
LCompareFAndBranch* lir =
new (alloc()) LCompareFAndBranch(comp, lhs, rhs, ifTrue, ifFalse);
add(lir, test);
return;
}
}
// Check if the operand for this test is a bitand operation. If it is, we want
// to emit an LBitAndAndBranch rather than an LTest*AndBranch.
if (opd->isBitAnd() && opd->isEmittedAtUses()) {
MDefinition* lhs = opd->getOperand(0);
MDefinition* rhs = opd->getOperand(1);
if (lhs->type() == MIRType::Int32 && rhs->type() == MIRType::Int32) {
ReorderCommutative(&lhs, &rhs, test);
lowerForBitAndAndBranch(new (alloc()) LBitAndAndBranch(ifTrue, ifFalse,
/*is64=*/false),
test, lhs, rhs);
return;
}
}
#if defined(ENABLE_WASM_SIMD) && \
(defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
defined(JS_CODEGEN_ARM64))
// Check if the operand for this test is an any_true/all_true SIMD operation.
// If it is, we want to emit an LWasmReduceAndBranchSimd128 node to avoid
// generating an intermediate boolean result.
if (opd->isWasmReduceSimd128() && opd->isEmittedAtUses()) {
MWasmReduceSimd128* node = opd->toWasmReduceSimd128();
if (canFoldReduceSimd128AndBranch(node->simdOp())) {
# ifdef DEBUG
js::wasm::ReportSimdAnalysis("simd128-to-scalar-and-branch -> folded");
# endif
auto* lir = new (alloc()) LWasmReduceAndBranchSimd128(
useRegister(node->input()), node->simdOp(), ifTrue, ifFalse);
add(lir, test);
return;
}
}
#endif
if (opd->isIsObject() && opd->isEmittedAtUses()) {
MDefinition* input = opd->toIsObject()->input();
MOZ_ASSERT(input->type() == MIRType::Value);
LIsObjectAndBranch* lir =
new (alloc()) LIsObjectAndBranch(ifTrue, ifFalse, useBoxAtStart(input));
add(lir, test);
return;
}
if (opd->isWasmRefIsSubtypeOfAbstract() && opd->isEmittedAtUses()) {
MWasmRefIsSubtypeOfAbstract* isSubTypeOf =
opd->toWasmRefIsSubtypeOfAbstract();
LAllocation ref = useRegister(isSubTypeOf->ref());
LDefinition scratch1 = LDefinition();
if (isSubTypeOf->destType().isAnyHierarchy()) {
// As in visitWasmRefIsSubtypeOfAbstract, we know we do not need
// scratch2 and superSTV because we know this is not a
// concrete type.
scratch1 = MacroAssembler::needScratch1ForBranchWasmRefIsSubtypeAny(
isSubTypeOf->destType())
? temp()
: LDefinition();
} else if (isSubTypeOf->destType().isFuncHierarchy() ||
isSubTypeOf->destType().isExternHierarchy()) {
// scratch1 is not necessary for abstract casts in other hierarchies
} else {
MOZ_CRASH("unknown type hierarchy when folding abstract casts");
}
add(new (alloc()) LWasmRefIsSubtypeOfAbstractAndBranch(
ifTrue, ifFalse, isSubTypeOf->sourceType(), isSubTypeOf->destType(),
ref, scratch1),
test);
return;
}
if (opd->isWasmRefIsSubtypeOfConcrete() && opd->isEmittedAtUses()) {
MWasmRefIsSubtypeOfConcrete* isSubTypeOf =
opd->toWasmRefIsSubtypeOfConcrete();
LAllocation ref = useRegister(isSubTypeOf->ref());
LAllocation superSTV = useRegister(isSubTypeOf->superSTV());
LDefinition scratch1 = LDefinition();
LDefinition scratch2 = LDefinition();
if (isSubTypeOf->destType().isAnyHierarchy()) {
// As in visitWasmRefIsSubtypeOfConcrete, we know we need scratch1 because
// we know this is a concrete type.
scratch1 = temp();
scratch2 = MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeAny(
isSubTypeOf->destType())
? temp()
: LDefinition();
} else if (isSubTypeOf->destType().isFuncHierarchy()) {
// As in visitWasmRefIsSubtypeOfConcrete again...
scratch1 = temp();
scratch2 = MacroAssembler::needScratch2ForBranchWasmRefIsSubtypeFunc(
isSubTypeOf->destType())
? temp()
: LDefinition();
} else if (isSubTypeOf->destType().isExternHierarchy()) {
MOZ_CRASH("concrete casts are not possible in the extern hierarchy");
} else {
MOZ_CRASH("unknown type hierarchy when folding abstract casts");
}
add(new (alloc()) LWasmRefIsSubtypeOfConcreteAndBranch(
ifTrue, ifFalse, isSubTypeOf->sourceType(), isSubTypeOf->destType(),
ref, superSTV, scratch1, scratch2),
test);
return;
}
if (opd->isIsNullOrUndefined() && opd->isEmittedAtUses()) {
MIsNullOrUndefined* isNullOrUndefined = opd->toIsNullOrUndefined();
MDefinition* input = isNullOrUndefined->value();
MOZ_ASSERT(input->type() == MIRType::Value);
auto* lir = new (alloc()) LIsNullOrUndefinedAndBranch(
isNullOrUndefined, ifTrue, ifFalse, useBoxAtStart(input));
add(lir, test);
return;
}
if (opd->isIsNoIter()) {
MOZ_ASSERT(opd->isEmittedAtUses());
MDefinition* input = opd->toIsNoIter()->input();
MOZ_ASSERT(input->type() == MIRType::Value);
LIsNoIterAndBranch* lir =
new (alloc()) LIsNoIterAndBranch(ifTrue, ifFalse, useBox(input));
add(lir, test);
return;
}
if (opd->isIteratorHasIndices()) {
MOZ_ASSERT(opd->isEmittedAtUses());
MDefinition* object = opd->toIteratorHasIndices()->object();
MDefinition* iterator = opd->toIteratorHasIndices()->iterator();
LIteratorHasIndicesAndBranch* lir = new (alloc())
LIteratorHasIndicesAndBranch(ifTrue, ifFalse, useRegister(object),
useRegister(iterator), temp(), temp());
add(lir, test);
return;
}
switch (opd->type()) {
case MIRType::Double:
add(new (alloc()) LTestDAndBranch(useRegister(opd), ifTrue, ifFalse));
break;
case MIRType::Float32:
add(new (alloc()) LTestFAndBranch(useRegister(opd), ifTrue, ifFalse));
break;
case MIRType::Int32:
case MIRType::Boolean:
add(new (alloc()) LTestIAndBranch(useRegister(opd), ifTrue, ifFalse));
break;
case MIRType::Int64:
add(new (alloc())
LTestI64AndBranch(useInt64Register(opd), ifTrue, ifFalse));
break;
case MIRType::BigInt:
add(new (alloc()) LTestBIAndBranch(useRegister(opd), ifTrue, ifFalse));
break;
default:
MOZ_CRASH("Bad type");
}
}
static inline bool CanEmitCompareAtUses(MInstruction* ins) {
if (!ins->canEmitAtUses()) {
return false;
}
// If the result is never used, we can usefully defer emission to the use
// point, since that will never happen.
MUseIterator iter(ins->usesBegin());
if (iter == ins->usesEnd()) {
return true;
}
// If the first use isn't of the expected form, the answer is No.
MNode* node = iter->consumer();
if (!node->isDefinition()) {
return false;
}
MDefinition* use = node->toDefinition();
if (!use->isTest() && !use->isWasmSelect()) {
return false;
}
// Emission can be deferred to the first use point, but only if there are no
// other use points.
iter++;
return iter == ins->usesEnd();
}
static bool CanCompareCharactersInline(const JSLinearString* linear) {
size_t length = linear->length();
// Limit the number of inline instructions used for character comparisons. Use
// the same instruction limit for both encodings, i.e. two-byte uses half the
// limit of Latin-1 strings.
constexpr size_t Latin1StringCompareCutoff = 32;
constexpr size_t TwoByteStringCompareCutoff = 16;
return length > 0 &&
(linear->hasLatin1Chars() ? length <= Latin1StringCompareCutoff
: length <= TwoByteStringCompareCutoff);
}
void LIRGenerator::visitCompare(MCompare* comp) {
MDefinition* left = comp->lhs();
MDefinition* right = comp->rhs();
// Try to fold the comparison so that we don't have to handle all cases.
bool result;
if (comp->tryFold(&result)) {
define(new (alloc()) LInteger(result), comp);
return;
}
// Move below the emitAtUses call if we ever implement
// LCompareSAndBranch. Doing this now wouldn't be wrong, but doesn't
// make sense and avoids confusion.
if (comp->compareType() == MCompare::Compare_String) {
if (IsEqualityOp(comp->jsop())) {
MConstant* constant = nullptr;
if (left->isConstant()) {
constant = left->toConstant();
} else if (right->isConstant()) {
constant = right->toConstant();
}
if (constant) {
JSLinearString* linear = &constant->toString()->asLinear();
if (CanCompareCharactersInline(linear)) {
MDefinition* input = left->isConstant() ? right : left;
auto* lir = new (alloc()) LCompareSInline(useRegister(input), linear);
define(lir, comp);
assignSafepoint(lir, comp);
return;
}
}
}
LCompareS* lir =
new (alloc()) LCompareS(useRegister(left), useRegister(right));
define(lir, comp);
assignSafepoint(lir, comp);
return;
}
// Compare two BigInts.
if (comp->compareType() == MCompare::Compare_BigInt) {
auto* lir = new (alloc()) LCompareBigInt(
useRegister(left), useRegister(right), temp(), temp(), temp());
define(lir, comp);
return;
}
// Compare BigInt with Int32.
if (comp->compareType() == MCompare::Compare_BigInt_Int32) {
auto* lir = new (alloc()) LCompareBigIntInt32(
useRegister(left), useRegister(right), temp(), temp());
define(lir, comp);
return;
}
// Compare BigInt with Double.
if (comp->compareType() == MCompare::Compare_BigInt_Double) {
auto* lir = new (alloc()) LCompareBigIntDouble(useRegisterAtStart(left),
useRegisterAtStart(right));
defineReturn(lir, comp);
return;
}
// Compare BigInt with String.
if (comp->compareType() == MCompare::Compare_BigInt_String) {
auto* lir = new (alloc()) LCompareBigIntString(useRegisterAtStart(left),
useRegisterAtStart(right));
defineReturn(lir, comp);
assignSafepoint(lir, comp);
return;
}
// Sniff out if the output of this compare is used only for a branching.
// If it is, then we will emit an LCompare*AndBranch instruction in place
// of this compare and any test that uses this compare. Thus, we can
// ignore this Compare.
if (CanEmitCompareAtUses(comp)) {
emitAtUses(comp);
return;
}
// Compare Null and Undefined.
if (comp->compareType() == MCompare::Compare_Null ||
comp->compareType() == MCompare::Compare_Undefined) {
if (left->type() == MIRType::Object) {
define(new (alloc()) LIsNullOrLikeUndefinedT(useRegister(left)), comp);
return;
}
if (IsLooseEqualityOp(comp->jsop())) {
auto* lir =
new (alloc()) LIsNullOrLikeUndefinedV(useBox(left), tempToUnbox());
define(lir, comp);
return;
}
if (comp->compareType() == MCompare::Compare_Null) {
auto* lir = new (alloc()) LIsNull(useBox(left));
define(lir, comp);
return;
}
auto* lir = new (alloc()) LIsUndefined(useBox(left));
define(lir, comp);
return;
}
// Compare Int32, Symbol, Object or Wasm pointers.
if (comp->isInt32Comparison() ||
comp->compareType() == MCompare::Compare_UInt32 ||
comp->compareType() == MCompare::Compare_UIntPtr ||
comp->compareType() == MCompare::Compare_Object ||
comp->compareType() == MCompare::Compare_Symbol ||
comp->compareType() == MCompare::Compare_WasmAnyRef) {
JSOp op = ReorderComparison(comp->jsop(), &left, &right);
LAllocation lhs = useRegister(left);
LAllocation rhs;
if (comp->isInt32Comparison() ||
comp->compareType() == MCompare::Compare_UInt32 ||
comp->compareType() == MCompare::Compare_UIntPtr) {
rhs = useAnyOrInt32Constant(right);
} else {
rhs = useAny(right);
}
define(new (alloc()) LCompare(op, lhs, rhs), comp);
return;
}
// Compare Int64.
if (comp->compareType() == MCompare::Compare_Int64 ||
comp->compareType() == MCompare::Compare_UInt64) {
JSOp op = ReorderComparison(comp->jsop(), &left, &right);
define(new (alloc()) LCompareI64(op, useInt64Register(left),
useInt64OrConstant(right)),
comp);
return;
}
// Compare doubles.
if (comp->isDoubleComparison()) {
define(new (alloc()) LCompareD(useRegister(left), useRegister(right)),
comp);
return;
}
// Compare float32.
if (comp->isFloat32Comparison()) {
define(new (alloc()) LCompareF(useRegister(left), useRegister(right)),
comp);
return;
}
MOZ_CRASH("Unrecognized compare type.");
}
void LIRGenerator::visitSameValueDouble(MSameValueDouble* ins) {
MDefinition* lhs = ins->lhs();
MDefinition* rhs = ins->rhs();
MOZ_ASSERT(lhs->type() == MIRType::Double);
MOZ_ASSERT(rhs->type() == MIRType::Double);
auto* lir = new (alloc())
LSameValueDouble(useRegister(lhs), useRegister(rhs), tempDouble());
define(lir, ins);
}
void LIRGenerator::visitSameValue(MSameValue* ins) {
MDefinition* lhs = ins->lhs();
MDefinition* rhs = ins->rhs();
MOZ_ASSERT(lhs->type() == MIRType::Value);
MOZ_ASSERT(rhs->type() == MIRType::Value);
auto* lir = new (alloc()) LSameValue(useBox(lhs), useBox(rhs));
define(lir, ins);
assignSafepoint(lir, ins);
}
void LIRGenerator::lowerBitOp(JSOp op, MBinaryInstruction* ins) {
MDefinition* lhs = ins->getOperand(0);
MDefinition* rhs = ins->getOperand(1);
MOZ_ASSERT(IsIntType(ins->type()));
if (ins->type() == MIRType::Int32) {
MOZ_ASSERT(lhs->type() == MIRType::Int32);
MOZ_ASSERT(rhs->type() == MIRType::Int32);
ReorderCommutative(&lhs, &rhs, ins);
lowerForALU(new (alloc()) LBitOpI(op), ins, lhs, rhs);
return;
}
if (ins->type() == MIRType::Int64) {
MOZ_ASSERT(lhs->type() == MIRType::Int64);
MOZ_ASSERT(rhs->type() == MIRType::Int64);
ReorderCommutative(&lhs, &rhs, ins);
lowerForALUInt64(new (alloc()) LBitOpI64(op), ins, lhs, rhs);
return;