Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/MIR.h"
#include "mozilla/EndianUtils.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Maybe.h"
#include <array>
#include <utility>
#include "jslibmath.h"
#include "jsmath.h"
#include "jsnum.h"
#include "builtin/RegExp.h"
#include "jit/AtomicOperations.h"
#include "jit/CompileInfo.h"
#include "jit/KnownClass.h"
#include "jit/MIR-wasm.h"
#include "jit/MIRGraph.h"
#include "jit/RangeAnalysis.h"
#include "jit/VMFunctions.h"
#include "jit/WarpBuilderShared.h"
#include "js/Conversions.h"
#include "js/experimental/JitInfo.h" // JSJitInfo, JSTypedMethodJitInfo
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/Text.h"
#include "util/Unicode.h"
#include "vm/BigIntType.h"
#include "vm/Float16.h"
#include "vm/Iteration.h" // js::NativeIterator
#include "vm/PlainObject.h" // js::PlainObject
#include "vm/Uint8Clamped.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/JSAtomUtils-inl.h" // TypeName
using namespace js;
using namespace js::jit;
using JS::ToInt32;
using mozilla::IsFloat32Representable;
using mozilla::IsPowerOfTwo;
using mozilla::NumbersAreIdentical;
NON_GC_POINTER_TYPE_ASSERTIONS_GENERATED
#ifdef DEBUG
size_t MUse::index() const { return consumer()->indexOf(this); }
#endif
template <size_t Op>
static void ConvertDefinitionToDouble(TempAllocator& alloc, MDefinition* def,
MInstruction* consumer) {
MInstruction* replace = MToDouble::New(alloc, def);
consumer->replaceOperand(Op, replace);
consumer->block()->insertBefore(consumer, replace);
}
template <size_t Arity, size_t Index>
static void ConvertOperandToDouble(MAryInstruction<Arity>* def,
TempAllocator& alloc) {
static_assert(Index < Arity);
auto* operand = def->getOperand(Index);
if (operand->type() == MIRType::Float32) {
ConvertDefinitionToDouble<Index>(alloc, operand, def);
}
}
template <size_t Arity, size_t... ISeq>
static void ConvertOperandsToDouble(MAryInstruction<Arity>* def,
TempAllocator& alloc,
std::index_sequence<ISeq...>) {
(ConvertOperandToDouble<Arity, ISeq>(def, alloc), ...);
}
template <size_t Arity>
static void ConvertOperandsToDouble(MAryInstruction<Arity>* def,
TempAllocator& alloc) {
ConvertOperandsToDouble<Arity>(def, alloc, std::make_index_sequence<Arity>{});
}
template <size_t Arity, size_t... ISeq>
static bool AllOperandsCanProduceFloat32(MAryInstruction<Arity>* def,
std::index_sequence<ISeq...>) {
return (def->getOperand(ISeq)->canProduceFloat32() && ...);
}
template <size_t Arity>
static bool AllOperandsCanProduceFloat32(MAryInstruction<Arity>* def) {
return AllOperandsCanProduceFloat32<Arity>(def,
std::make_index_sequence<Arity>{});
}
static bool CheckUsesAreFloat32Consumers(const MInstruction* ins) {
if (ins->isImplicitlyUsed()) {
return false;
}
bool allConsumerUses = true;
for (MUseDefIterator use(ins); allConsumerUses && use; use++) {
allConsumerUses &= use.def()->canConsumeFloat32(use.use());
}
return allConsumerUses;
}
#ifdef JS_JITSPEW
static const char* OpcodeName(MDefinition::Opcode op) {
static const char* const names[] = {
# define NAME(x) #x,
MIR_OPCODE_LIST(NAME)
# undef NAME
};
return names[unsigned(op)];
}
void MDefinition::PrintOpcodeName(GenericPrinter& out, Opcode op) {
out.printf("%s", OpcodeName(op));
}
uint32_t js::jit::GetMBasicBlockId(const MBasicBlock* block) {
return block->id();
}
#endif
template <MIRType Type>
static auto ToIntConstant(MConstant* cst) {
MOZ_ASSERT(cst->type() == Type);
if constexpr (Type == MIRType::Int32) {
return cst->toInt32();
} else if constexpr (Type == MIRType::Int64) {
return cst->toInt64();
} else if constexpr (Type == MIRType::IntPtr) {
return cst->toIntPtr();
}
}
template <MIRType Type, typename IntT>
static MConstant* NewIntConstant(TempAllocator& alloc, IntT i) {
if constexpr (Type == MIRType::Int32) {
static_assert(std::is_same_v<IntT, int32_t>);
return MConstant::NewInt32(alloc, i);
} else if constexpr (Type == MIRType::Int64) {
static_assert(std::is_same_v<IntT, int64_t>);
return MConstant::NewInt64(alloc, i);
} else if constexpr (Type == MIRType::IntPtr) {
static_assert(std::is_same_v<IntT, intptr_t>);
return MConstant::NewIntPtr(alloc, i);
}
}
template <MIRType Type>
static MConstant* EvaluateIntConstantOperands(TempAllocator& alloc,
MBinaryInstruction* ins) {
MDefinition* left = ins->lhs();
MDefinition* right = ins->rhs();
if (!left->isConstant() || !right->isConstant()) {
return nullptr;
}
using IntT = decltype(ToIntConstant<Type>(nullptr));
using UnsigedInt = std::make_unsigned_t<IntT>;
// Right-hand side operand of shift must be non-negative and be less-than the
// number of bits in the left-hand side operand. Otherwise the behavior is
// undefined.
static constexpr IntT shiftMask = (sizeof(IntT) * CHAR_BIT) - 1;
IntT lhs = ToIntConstant<Type>(left->toConstant());
IntT rhs = ToIntConstant<Type>(right->toConstant());
IntT ret;
switch (ins->op()) {
case MDefinition::Opcode::BitAnd:
case MDefinition::Opcode::BigIntPtrBitAnd:
ret = lhs & rhs;
break;
case MDefinition::Opcode::BitOr:
case MDefinition::Opcode::BigIntPtrBitOr:
ret = lhs | rhs;
break;
case MDefinition::Opcode::BitXor:
case MDefinition::Opcode::BigIntPtrBitXor:
ret = lhs ^ rhs;
break;
case MDefinition::Opcode::Lsh:
// Left-hand side operand must be non-negative, otherwise the behavior is
// undefined. Cast to unsigned to ensure the behavior is always defined.
//
// Note: Cast to unsigned is no longer needed when compiling to C++20.
ret = UnsigedInt(lhs) << (rhs & shiftMask);
break;
case MDefinition::Opcode::Rsh:
// The result is implementation-defined if the left-hand side operand is
// negative. Most implementations perform an arithmetic right-shift, which
// we rely on here.
//
// Note: Guaranteed to be an arithmetic right-shift in C++20.
ret = lhs >> (rhs & shiftMask);
break;
case MDefinition::Opcode::Ursh:
// Decline folding if the output doesn't fit into a signed result and
// bailouts are disabled. (Wasm has bailouts disabled.)
if (lhs < 0 && rhs == 0 && !ins->toUrsh()->bailoutsDisabled()) {
return nullptr;
}
ret = UnsigedInt(lhs) >> (UnsigedInt(rhs) & shiftMask);
break;
case MDefinition::Opcode::BigIntPtrLsh:
case MDefinition::Opcode::BigIntPtrRsh: {
// BigIntPtr shifts are special:
// 1. Excess shift amounts produce BigInt larger than IntPtr.
// 2. Negative shifts reverse the shift direction.
// Decline folding for excess shift amounts.
UnsigedInt shift = mozilla::Abs(rhs);
if ((shift & shiftMask) != shift) {
return nullptr;
}
bool isLsh = (ins->isBigIntPtrLsh() && rhs >= 0) ||
(ins->isBigIntPtrRsh() && rhs < 0);
if (isLsh) {
ret = UnsigedInt(lhs) << shift;
} else {
ret = lhs >> shift;
}
break;
}
case MDefinition::Opcode::Add:
case MDefinition::Opcode::BigIntPtrAdd: {
auto checked = mozilla::CheckedInt<IntT>(lhs) + rhs;
if (!checked.isValid()) {
return nullptr;
}
ret = checked.value();
break;
}
case MDefinition::Opcode::Sub:
case MDefinition::Opcode::BigIntPtrSub: {
auto checked = mozilla::CheckedInt<IntT>(lhs) - rhs;
if (!checked.isValid()) {
return nullptr;
}
ret = checked.value();
break;
}
case MDefinition::Opcode::Mul:
case MDefinition::Opcode::BigIntPtrMul: {
auto checked = mozilla::CheckedInt<IntT>(lhs) * rhs;
if (!checked.isValid()) {
return nullptr;
}
ret = checked.value();
break;
}
case MDefinition::Opcode::Div: {
if (ins->toDiv()->isUnsigned()) {
auto checked =
mozilla::CheckedInt<UnsigedInt>(UnsigedInt(lhs)) / UnsigedInt(rhs);
if (!checked.isValid()) {
return nullptr;
}
ret = IntT(checked.value());
break;
}
[[fallthrough]];
}
case MDefinition::Opcode::BigIntPtrDiv: {
auto checked = mozilla::CheckedInt<IntT>(lhs) / rhs;
if (!checked.isValid()) {
return nullptr;
}
ret = checked.value();
// Decline folding if the numerator isn't evenly divisible by the
// denominator. Only applies for non-truncating int32 division.
if constexpr (Type == MIRType::Int32) {
if (ret * rhs != lhs && !ins->toDiv()->isTruncated()) {
return nullptr;
}
}
break;
}
case MDefinition::Opcode::Mod: {
if (ins->toMod()->isUnsigned()) {
auto checked =
mozilla::CheckedInt<UnsigedInt>(UnsigedInt(lhs)) % UnsigedInt(rhs);
if (!checked.isValid()) {
return nullptr;
}
ret = IntT(checked.value());
break;
}
[[fallthrough]];
}
case MDefinition::Opcode::BigIntPtrMod: {
auto checked = mozilla::CheckedInt<IntT>(lhs) % rhs;
if (!checked.isValid()) {
return nullptr;
}
ret = checked.value();
// Decline folding if the result is negative zero. Only applies for
// non-truncating int32 remainder.
if constexpr (Type == MIRType::Int32) {
if (ret == 0 && lhs < 0 && !ins->toMod()->isTruncated()) {
return nullptr;
}
}
break;
}
default:
MOZ_CRASH("NYI");
}
return NewIntConstant<Type>(alloc, ret);
}
static MConstant* EvaluateInt32ConstantOperands(TempAllocator& alloc,
MBinaryInstruction* ins) {
return EvaluateIntConstantOperands<MIRType::Int32>(alloc, ins);
}
static MConstant* EvaluateInt64ConstantOperands(TempAllocator& alloc,
MBinaryInstruction* ins) {
return EvaluateIntConstantOperands<MIRType::Int64>(alloc, ins);
}
static MConstant* EvaluateIntPtrConstantOperands(TempAllocator& alloc,
MBinaryInstruction* ins) {
return EvaluateIntConstantOperands<MIRType::IntPtr>(alloc, ins);
}
static MConstant* EvaluateConstantOperands(TempAllocator& alloc,
MBinaryInstruction* ins) {
MOZ_ASSERT(IsTypeRepresentableAsDouble(ins->type()));
if (ins->type() == MIRType::Int32) {
return EvaluateInt32ConstantOperands(alloc, ins);
}
MDefinition* left = ins->lhs();
MDefinition* right = ins->rhs();
MOZ_ASSERT(IsFloatingPointType(left->type()));
MOZ_ASSERT(IsFloatingPointType(right->type()));
if (!left->isConstant() || !right->isConstant()) {
return nullptr;
}
double lhs = left->toConstant()->numberToDouble();
double rhs = right->toConstant()->numberToDouble();
double ret;
switch (ins->op()) {
case MDefinition::Opcode::Add:
ret = lhs + rhs;
break;
case MDefinition::Opcode::Sub:
ret = lhs - rhs;
break;
case MDefinition::Opcode::Mul:
ret = lhs * rhs;
break;
case MDefinition::Opcode::Div:
ret = NumberDiv(lhs, rhs);
break;
case MDefinition::Opcode::Mod:
ret = NumberMod(lhs, rhs);
break;
default:
MOZ_CRASH("NYI");
}
if (ins->type() == MIRType::Float32) {
return MConstant::NewFloat32(alloc, float(ret));
}
MOZ_ASSERT(ins->type() == MIRType::Double);
return MConstant::New(alloc, DoubleValue(ret));
}
static MConstant* EvaluateConstantNaNOperand(MBinaryInstruction* ins) {
auto* left = ins->lhs();
auto* right = ins->rhs();
MOZ_ASSERT(IsTypeRepresentableAsDouble(left->type()));
MOZ_ASSERT(IsTypeRepresentableAsDouble(right->type()));
MOZ_ASSERT(left->type() == ins->type());
MOZ_ASSERT(right->type() == ins->type());
// Don't fold NaN if we can't return a floating point type.
if (!IsFloatingPointType(ins->type())) {
return nullptr;
}
MOZ_ASSERT(!left->isConstant() || !right->isConstant(),
"EvaluateConstantOperands should have handled this case");
// One operand must be a constant NaN.
MConstant* cst;
if (left->isConstant()) {
cst = left->toConstant();
} else if (right->isConstant()) {
cst = right->toConstant();
} else {
return nullptr;
}
if (!std::isnan(cst->numberToDouble())) {
return nullptr;
}
// Fold to constant NaN.
return cst;
}
static MMul* EvaluateExactReciprocal(TempAllocator& alloc, MDiv* ins) {
// we should fold only when it is a floating point operation
if (!IsFloatingPointType(ins->type())) {
return nullptr;
}
MDefinition* left = ins->getOperand(0);
MDefinition* right = ins->getOperand(1);
if (!right->isConstant()) {
return nullptr;
}
int32_t num;
if (!mozilla::NumberIsInt32(right->toConstant()->numberToDouble(), &num)) {
return nullptr;
}
// check if rhs is a power of two or zero
if (num != 0 && !mozilla::IsPowerOfTwo(mozilla::Abs(num))) {
return nullptr;
}
double ret = 1.0 / double(num);
MConstant* foldedRhs;
if (ins->type() == MIRType::Float32) {
foldedRhs = MConstant::NewFloat32(alloc, ret);
} else {
foldedRhs = MConstant::NewDouble(alloc, ret);
}
MOZ_ASSERT(foldedRhs->type() == ins->type());
ins->block()->insertBefore(ins, foldedRhs);
MMul* mul = MMul::New(alloc, left, foldedRhs, ins->type());
mul->setMustPreserveNaN(ins->mustPreserveNaN());
return mul;
}
#ifdef JS_JITSPEW
const char* MDefinition::opName() const { return OpcodeName(op()); }
void MDefinition::printName(GenericPrinter& out) const {
PrintOpcodeName(out, op());
out.printf("#%u", id());
}
#endif
HashNumber MDefinition::valueHash() const {
HashNumber out = HashNumber(op());
for (size_t i = 0, e = numOperands(); i < e; i++) {
out = addU32ToHash(out, getOperand(i)->id());
}
if (MDefinition* dep = dependency()) {
out = addU32ToHash(out, dep->id());
}
return out;
}
HashNumber MNullaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
HashNumber MUnaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
hash = addU32ToHash(hash, getOperand(0)->id());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
HashNumber MBinaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
hash = addU32ToHash(hash, getOperand(0)->id());
hash = addU32ToHash(hash, getOperand(1)->id());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
HashNumber MTernaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
hash = addU32ToHash(hash, getOperand(0)->id());
hash = addU32ToHash(hash, getOperand(1)->id());
hash = addU32ToHash(hash, getOperand(2)->id());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
HashNumber MQuaternaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
hash = addU32ToHash(hash, getOperand(0)->id());
hash = addU32ToHash(hash, getOperand(1)->id());
hash = addU32ToHash(hash, getOperand(2)->id());
hash = addU32ToHash(hash, getOperand(3)->id());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
HashNumber MQuinaryInstruction::valueHash() const {
HashNumber hash = HashNumber(op());
hash = addU32ToHash(hash, getOperand(0)->id());
hash = addU32ToHash(hash, getOperand(1)->id());
hash = addU32ToHash(hash, getOperand(2)->id());
hash = addU32ToHash(hash, getOperand(3)->id());
hash = addU32ToHash(hash, getOperand(4)->id());
if (MDefinition* dep = dependency()) {
hash = addU32ToHash(hash, dep->id());
}
MOZ_ASSERT(hash == MDefinition::valueHash());
return hash;
}
const MDefinition* MDefinition::skipObjectGuards() const {
const MDefinition* result = this;
// These instructions don't modify the object and just guard specific
// properties.
while (true) {
if (result->isGuardShape()) {
result = result->toGuardShape()->object();
continue;
}
if (result->isGuardNullProto()) {
result = result->toGuardNullProto()->object();
continue;
}
if (result->isGuardProto()) {
result = result->toGuardProto()->object();
continue;
}
break;
}
return result;
}
bool MDefinition::congruentIfOperandsEqual(const MDefinition* ins) const {
if (op() != ins->op()) {
return false;
}
if (type() != ins->type()) {
return false;
}
if (isEffectful() || ins->isEffectful()) {
return false;
}
if (numOperands() != ins->numOperands()) {
return false;
}
for (size_t i = 0, e = numOperands(); i < e; i++) {
if (getOperand(i) != ins->getOperand(i)) {
return false;
}
}
return true;
}
MDefinition* MDefinition::foldsTo(TempAllocator& alloc) {
// In the default case, there are no constants to fold.
return this;
}
MDefinition* MInstruction::foldsToStore(TempAllocator& alloc) {
if (!dependency()) {
return nullptr;
}
MDefinition* store = dependency();
if (mightAlias(store) != AliasType::MustAlias) {
return nullptr;
}
if (!store->block()->dominates(block())) {
return nullptr;
}
MDefinition* value;
switch (store->op()) {
case Opcode::StoreFixedSlot:
value = store->toStoreFixedSlot()->value();
break;
case Opcode::StoreDynamicSlot:
value = store->toStoreDynamicSlot()->value();
break;
case Opcode::StoreElement:
value = store->toStoreElement()->value();
break;
default:
MOZ_CRASH("unknown store");
}
// If the type are matching then we return the value which is used as
// argument of the store.
if (value->type() != type()) {
// If we expect to read a type which is more generic than the type seen
// by the store, then we box the value used by the store.
if (type() != MIRType::Value) {
return nullptr;
}
MOZ_ASSERT(value->type() < MIRType::Value);
MBox* box = MBox::New(alloc, value);
value = box;
}
return value;
}
void MDefinition::analyzeEdgeCasesForward() {}
void MDefinition::analyzeEdgeCasesBackward() {}
void MInstruction::setResumePoint(MResumePoint* resumePoint) {
MOZ_ASSERT(!resumePoint_);
resumePoint_ = resumePoint;
resumePoint_->setInstruction(this);
}
void MInstruction::stealResumePoint(MInstruction* other) {
MResumePoint* resumePoint = other->resumePoint_;
other->resumePoint_ = nullptr;
resumePoint->resetInstruction();
setResumePoint(resumePoint);
}
void MInstruction::moveResumePointAsEntry() {
MOZ_ASSERT(isNop());
block()->clearEntryResumePoint();
block()->setEntryResumePoint(resumePoint_);
resumePoint_->resetInstruction();
resumePoint_ = nullptr;
}
void MInstruction::clearResumePoint() {
resumePoint_->resetInstruction();
block()->discardPreAllocatedResumePoint(resumePoint_);
resumePoint_ = nullptr;
}
MDefinition* MTest::foldsDoubleNegation(TempAllocator& alloc) {
MDefinition* op = getOperand(0);
if (op->isNot()) {
// If the operand of the Not is itself a Not, they cancel out.
MDefinition* opop = op->getOperand(0);
if (opop->isNot()) {
return MTest::New(alloc, opop->toNot()->input(), ifTrue(), ifFalse());
}
return MTest::New(alloc, op->toNot()->input(), ifFalse(), ifTrue());
}
return nullptr;
}
MDefinition* MTest::foldsConstant(TempAllocator& alloc) {
MDefinition* op = getOperand(0);
if (MConstant* opConst = op->maybeConstantValue()) {
bool b;
if (opConst->valueToBoolean(&b)) {
return MGoto::New(alloc, b ? ifTrue() : ifFalse());
}
}
return nullptr;
}
MDefinition* MTest::foldsTypes(TempAllocator& alloc) {
MDefinition* op = getOperand(0);
switch (op->type()) {
case MIRType::Undefined:
case MIRType::Null:
return MGoto::New(alloc, ifFalse());
case MIRType::Symbol:
return MGoto::New(alloc, ifTrue());
default:
break;
}
return nullptr;
}
class UsesIterator {
MDefinition* def_;
public:
explicit UsesIterator(MDefinition* def) : def_(def) {}
auto begin() const { return def_->usesBegin(); }
auto end() const { return def_->usesEnd(); }
};
static bool AllInstructionsDeadIfUnused(MBasicBlock* block) {
for (auto* ins : *block) {
// Skip trivial instructions.
if (ins->isNop() || ins->isGoto()) {
continue;
}
// All uses must be within the current block.
for (auto* use : UsesIterator(ins)) {
if (use->consumer()->block() != block) {
return false;
}
}
// All instructions within this block must be dead if unused.
if (!DeadIfUnused(ins)) {
return false;
}
}
return true;
}
MDefinition* MTest::foldsNeedlessControlFlow(TempAllocator& alloc) {
// All instructions within both successors need be dead if unused.
if (!AllInstructionsDeadIfUnused(ifTrue()) ||
!AllInstructionsDeadIfUnused(ifFalse())) {
return nullptr;
}
// Both successors must have the same target successor.
if (ifTrue()->numSuccessors() != 1 || ifFalse()->numSuccessors() != 1) {
return nullptr;
}
if (ifTrue()->getSuccessor(0) != ifFalse()->getSuccessor(0)) {
return nullptr;
}
// The target successor's phis must be redundant. Redundant phis should have
// been removed in an earlier pass, so only check if any phis are present,
// which is a stronger condition.
if (ifTrue()->successorWithPhis()) {
return nullptr;
}
return MGoto::New(alloc, ifTrue());
}
// If a test is dominated by either the true or false path of a previous test of
// the same condition, then the test is redundant and can be converted into a
// goto true or goto false, respectively.
MDefinition* MTest::foldsRedundantTest(TempAllocator& alloc) {
MBasicBlock* myBlock = this->block();
MDefinition* originalInput = getOperand(0);
// Handle single and double negatives. This ensures that we do not miss a
// folding opportunity due to a condition being inverted.
MDefinition* newInput = input();
bool inverted = false;
if (originalInput->isNot()) {
newInput = originalInput->toNot()->input();
inverted = true;
if (originalInput->toNot()->input()->isNot()) {
newInput = originalInput->toNot()->input()->toNot()->input();
inverted = false;
}
}
// The specific order of traversal does not matter. If there are multiple
// dominating redundant tests, they will either agree on direction (in which
// case we will prune the same way regardless of order), or they will
// disagree, in which case we will eventually be marked entirely dead by the
// folding of the redundant parent.
for (MUseIterator i(newInput->usesBegin()), e(newInput->usesEnd()); i != e;
++i) {
if (!i->consumer()->isDefinition()) {
continue;
}
if (!i->consumer()->toDefinition()->isTest()) {
continue;
}
MTest* otherTest = i->consumer()->toDefinition()->toTest();
if (otherTest == this) {
continue;
}
if (otherTest->ifFalse()->dominates(myBlock)) {
// This test cannot be true, so fold to a goto false.
return MGoto::New(alloc, inverted ? ifTrue() : ifFalse());
}
if (otherTest->ifTrue()->dominates(myBlock)) {
// This test cannot be false, so fold to a goto true.
return MGoto::New(alloc, inverted ? ifFalse() : ifTrue());
}
}
return nullptr;
}
MDefinition* MTest::foldsTo(TempAllocator& alloc) {
if (MDefinition* def = foldsRedundantTest(alloc)) {
return def;
}
if (MDefinition* def = foldsDoubleNegation(alloc)) {
return def;
}
if (MDefinition* def = foldsConstant(alloc)) {
return def;
}
if (MDefinition* def = foldsTypes(alloc)) {
return def;
}
if (MDefinition* def = foldsNeedlessControlFlow(alloc)) {
return def;
}
return this;
}
AliasSet MThrow::getAliasSet() const {
return AliasSet::Store(AliasSet::ExceptionState);
}
AliasSet MThrowWithStack::getAliasSet() const {
return AliasSet::Store(AliasSet::ExceptionState);
}
AliasSet MNewArrayDynamicLength::getAliasSet() const {
return AliasSet::Store(AliasSet::ExceptionState);
}
AliasSet MNewTypedArrayDynamicLength::getAliasSet() const {
return AliasSet::Store(AliasSet::ExceptionState);
}
#ifdef JS_JITSPEW
void MDefinition::printOpcode(GenericPrinter& out) const {
PrintOpcodeName(out, op());
if (numOperands() > 0) {
out.printf(" <- ");
}
for (size_t j = 0, e = numOperands(); j < e; j++) {
if (j > 0) {
out.printf(", ");
}
if (getUseFor(j)->hasProducer()) {
getOperand(j)->printName(out);
} else {
out.printf("(null)");
}
}
}
void MDefinition::dump(GenericPrinter& out) const {
printName(out);
out.printf(":%s", StringFromMIRType(type()));
out.printf(" = ");
printOpcode(out);
out.printf("\n");
if (isInstruction()) {
if (MResumePoint* resume = toInstruction()->resumePoint()) {
resume->dump(out);
}
}
}
void MDefinition::dump() const {
Fprinter out(stderr);
dump(out);
out.finish();
}
void MDefinition::dumpLocation(GenericPrinter& out) const {
MResumePoint* rp = nullptr;
const char* linkWord = nullptr;
if (isInstruction() && toInstruction()->resumePoint()) {
rp = toInstruction()->resumePoint();
linkWord = "at";
} else {
rp = block()->entryResumePoint();
linkWord = "after";
}
while (rp) {
JSScript* script = rp->block()->info().script();
uint32_t lineno = PCToLineNumber(rp->block()->info().script(), rp->pc());
out.printf(" %s %s:%u\n", linkWord, script->filename(), lineno);
rp = rp->caller();
linkWord = "in";
}
}
void MDefinition::dumpLocation() const {
Fprinter out(stderr);
dumpLocation(out);
out.finish();
}
#endif
#if defined(DEBUG) || defined(JS_JITSPEW)
size_t MDefinition::useCount() const {
size_t count = 0;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
count++;
}
return count;
}
size_t MDefinition::defUseCount() const {
size_t count = 0;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
if ((*i)->consumer()->isDefinition()) {
count++;
}
}
return count;
}
#endif
bool MDefinition::hasOneUse() const {
MUseIterator i(uses_.begin());
if (i == uses_.end()) {
return false;
}
i++;
return i == uses_.end();
}
bool MDefinition::hasOneDefUse() const {
bool hasOneDefUse = false;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
if (!(*i)->consumer()->isDefinition()) {
continue;
}
// We already have a definition use. So 1+
if (hasOneDefUse) {
return false;
}
// We saw one definition. Loop to test if there is another.
hasOneDefUse = true;
}
return hasOneDefUse;
}
bool MDefinition::hasOneLiveDefUse() const {
bool hasOneDefUse = false;
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
if (!(*i)->consumer()->isDefinition()) {
continue;
}
MDefinition* def = (*i)->consumer()->toDefinition();
if (def->isRecoveredOnBailout()) {
continue;
}
// We already have a definition use. So 1+
if (hasOneDefUse) {
return false;
}
// We saw one definition. Loop to test if there is another.
hasOneDefUse = true;
}
return hasOneDefUse;
}
bool MDefinition::hasDefUses() const {
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
if ((*i)->consumer()->isDefinition()) {
return true;
}
}
return false;
}
bool MDefinition::hasLiveDefUses() const {
for (MUseIterator i(uses_.begin()); i != uses_.end(); i++) {
MNode* ins = (*i)->consumer();
if (ins->isDefinition()) {
if (!ins->toDefinition()->isRecoveredOnBailout()) {
return true;
}
} else {
MOZ_ASSERT(ins->isResumePoint());
if (!ins->toResumePoint()->isRecoverableOperand(*i)) {
return true;
}
}
}
return false;
}
MDefinition* MDefinition::maybeSingleDefUse() const {
MUseDefIterator use(this);
if (!use) {
// No def-uses.
return nullptr;
}
MDefinition* useDef = use.def();
use++;
if (use) {
// More than one def-use.
return nullptr;
}
return useDef;
}
MDefinition* MDefinition::maybeMostRecentlyAddedDefUse() const {
MUseDefIterator use(this);
if (!use) {
// No def-uses.
return nullptr;
}
MDefinition* mostRecentUse = use.def();
#ifdef DEBUG
// This function relies on addUse adding new uses to the front of the list.
// Check this invariant by asserting the next few uses are 'older'. Skip this
// for phis because setBackedge can add a new use for a loop phi even if the
// loop body has a use with an id greater than the loop phi's id.
if (!mostRecentUse->isPhi()) {
static constexpr size_t NumUsesToCheck = 3;
use++;
for (size_t i = 0; use && i < NumUsesToCheck; i++, use++) {
MOZ_ASSERT(use.def()->id() <= mostRecentUse->id());
}
}
#endif
return mostRecentUse;
}
void MDefinition::replaceAllUsesWith(MDefinition* dom) {
for (size_t i = 0, e = numOperands(); i < e; ++i) {
getOperand(i)->setImplicitlyUsedUnchecked();
}
justReplaceAllUsesWith(dom);
}
void MDefinition::justReplaceAllUsesWith(MDefinition* dom) {
MOZ_ASSERT(dom != nullptr);
MOZ_ASSERT(dom != this);
// Carry over the fact the value has uses which are no longer inspectable
// with the graph.
if (isImplicitlyUsed()) {
dom->setImplicitlyUsedUnchecked();
}
for (MUseIterator i(usesBegin()), e(usesEnd()); i != e; ++i) {
i->setProducerUnchecked(dom);
}
dom->uses_.takeElements(uses_);
}
bool MDefinition::optimizeOutAllUses(TempAllocator& alloc) {
for (MUseIterator i(usesBegin()), e(usesEnd()); i != e;) {
MUse* use = *i++;
MConstant* constant = use->consumer()->block()->optimizedOutConstant(alloc);
if (!alloc.ensureBallast()) {
return false;
}
// Update the resume point operand to use the optimized-out constant.
use->setProducerUnchecked(constant);
constant->addUseUnchecked(use);
}
// Remove dangling pointers.
this->uses_.clear();
return true;
}
void MDefinition::replaceAllLiveUsesWith(MDefinition* dom) {
for (MUseIterator i(usesBegin()), e(usesEnd()); i != e;) {
MUse* use = *i++;
MNode* consumer = use->consumer();
if (consumer->isResumePoint()) {
continue;
}
if (consumer->isDefinition() &&
consumer->toDefinition()->isRecoveredOnBailout()) {
continue;
}
// Update the operand to use the dominating definition.
use->replaceProducer(dom);
}
}
MConstant* MConstant::New(TempAllocator& alloc, const Value& v) {
return new (alloc) MConstant(alloc, v);
}
MConstant* MConstant::New(TempAllocator::Fallible alloc, const Value& v) {
return new (alloc) MConstant(alloc.alloc, v);
}
MConstant* MConstant::NewBoolean(TempAllocator& alloc, bool b) {
return new (alloc) MConstant(b);
}
MConstant* MConstant::NewDouble(TempAllocator& alloc, double d) {
return new (alloc) MConstant(d);
}
MConstant* MConstant::NewFloat32(TempAllocator& alloc, double d) {
MOZ_ASSERT(mozilla::IsFloat32Representable(d));
return new (alloc) MConstant(float(d));
}
MConstant* MConstant::NewInt32(TempAllocator& alloc, int32_t i) {
return new (alloc) MConstant(i);
}
MConstant* MConstant::NewInt64(TempAllocator& alloc, int64_t i) {
return new (alloc) MConstant(MIRType::Int64, i);
}
MConstant* MConstant::NewIntPtr(TempAllocator& alloc, intptr_t i) {
return new (alloc) MConstant(MIRType::IntPtr, i);
}
MConstant* MConstant::NewMagic(TempAllocator& alloc, JSWhyMagic m) {
return new (alloc) MConstant(alloc, MagicValue(m));
}
MConstant* MConstant::NewNull(TempAllocator& alloc) {
return new (alloc) MConstant(MIRType::Null);
}
MConstant* MConstant::NewObject(TempAllocator& alloc, JSObject* v) {
return new (alloc) MConstant(v);
}
MConstant* MConstant::NewShape(TempAllocator& alloc, Shape* s) {
return new (alloc) MConstant(s);
}
MConstant* MConstant::NewString(TempAllocator& alloc, JSString* s) {
return new (alloc) MConstant(alloc, StringValue(s));
}
MConstant* MConstant::NewUndefined(TempAllocator& alloc) {
return new (alloc) MConstant(MIRType::Undefined);
}
static MIRType MIRTypeFromValue(const js::Value& vp) {
if (vp.isDouble()) {
return MIRType::Double;
}
if (vp.isMagic()) {
switch (vp.whyMagic()) {
case JS_OPTIMIZED_OUT:
return MIRType::MagicOptimizedOut;
case JS_ELEMENTS_HOLE:
return MIRType::MagicHole;
case JS_IS_CONSTRUCTING:
return MIRType::MagicIsConstructing;
case JS_UNINITIALIZED_LEXICAL:
return MIRType::MagicUninitializedLexical;
default:
MOZ_ASSERT_UNREACHABLE("Unexpected magic constant");
}
}
return MIRTypeFromValueType(vp.extractNonDoubleType());
}
MConstant::MConstant(TempAllocator& alloc, const js::Value& vp)
: MNullaryInstruction(classOpcode) {
setResultType(MIRTypeFromValue(vp));
MOZ_ASSERT(payload_.asBits == 0);
switch (type()) {
case MIRType::Undefined:
case MIRType::Null:
break;
case MIRType::Boolean:
payload_.b = vp.toBoolean();
break;
case MIRType::Int32:
payload_.i32 = vp.toInt32();
break;
case MIRType::Double:
payload_.d = vp.toDouble();
break;
case MIRType::String: {
JSString* str = vp.toString();
MOZ_ASSERT(!IsInsideNursery(str));
payload_.str = &str->asOffThreadAtom();
break;
}
case MIRType::Symbol:
payload_.sym = vp.toSymbol();
break;
case MIRType::BigInt:
MOZ_ASSERT(!IsInsideNursery(vp.toBigInt()));
payload_.bi = vp.toBigInt();
break;
case MIRType::Object:
MOZ_ASSERT(!IsInsideNursery(&vp.toObject()));
payload_.obj = &vp.toObject();
break;
case MIRType::MagicOptimizedOut:
case MIRType::MagicHole:
case MIRType::MagicIsConstructing:
case MIRType::MagicUninitializedLexical:
break;
default:
MOZ_CRASH("Unexpected type");
}
setMovable();
}
MConstant::MConstant(JSObject* obj) : MConstant(MIRType::Object) {
MOZ_ASSERT(!IsInsideNursery(obj));
payload_.obj = obj;
}
MConstant::MConstant(Shape* shape) : MConstant(MIRType::Shape) {
payload_.shape = shape;
}
#ifdef DEBUG
void MConstant::assertInitializedPayload() const {
// valueHash() and equals() expect the unused payload bits to be
// initialized to zero. Assert this in debug builds.
switch (type()) {
case MIRType::Int32:
case MIRType::Float32:
# if MOZ_LITTLE_ENDIAN()
MOZ_ASSERT((payload_.asBits >> 32) == 0);
# else
MOZ_ASSERT((payload_.asBits << 32) == 0);
# endif
break;
case MIRType::Boolean:
# if MOZ_LITTLE_ENDIAN()
MOZ_ASSERT((payload_.asBits >> 1) == 0);
# else
MOZ_ASSERT((payload_.asBits & ~(1ULL << 56)) == 0);
# endif
break;
case MIRType::Double:
case MIRType::Int64:
break;
case MIRType::String:
case MIRType::Object:
case MIRType::Symbol:
case MIRType::BigInt:
case MIRType::IntPtr:
case MIRType::Shape:
# if MOZ_LITTLE_ENDIAN()
MOZ_ASSERT_IF(JS_BITS_PER_WORD == 32, (payload_.asBits >> 32) == 0);
# else
MOZ_ASSERT_IF(JS_BITS_PER_WORD == 32, (payload_.asBits << 32) == 0);
# endif
break;
default:
MOZ_ASSERT(IsNullOrUndefined(type()) || IsMagicType(type()));
MOZ_ASSERT(payload_.asBits == 0);
break;
}
}
#endif
HashNumber MConstant::valueHash() const {
static_assert(sizeof(Payload) == sizeof(uint64_t),
"Code below assumes payload fits in 64 bits");
assertInitializedPayload();
return ConstantValueHash(type(), payload_.asBits);
}
HashNumber MConstantProto::valueHash() const {
HashNumber hash = protoObject()->valueHash();
const MDefinition* receiverObject = getReceiverObject();
if (receiverObject) {
hash = addU32ToHash(hash, receiverObject->id());
}
return hash;
}
bool MConstant::congruentTo(const MDefinition* ins) const {
return ins->isConstant() && equals(ins->toConstant());
}
#ifdef JS_JITSPEW
void MConstant::printOpcode(GenericPrinter& out) const {
PrintOpcodeName(out, op());
out.printf(" ");
switch (type()) {
case MIRType::Undefined:
out.printf("undefined");
break;
case MIRType::Null:
out.printf("null");
break;
case MIRType::Boolean:
out.printf(toBoolean() ? "true" : "false");
break;
case MIRType::Int32:
out.printf("0x%x", uint32_t(toInt32()));
break;
case MIRType::Int64:
out.printf("0x%" PRIx64, uint64_t(toInt64()));
break;
case MIRType::IntPtr:
out.printf("0x%" PRIxPTR, uintptr_t(toIntPtr()));
break;
case MIRType::Double:
out.printf("%.16g", toDouble());
break;
case MIRType::Float32: {
float val = toFloat32();
out.printf("%.16g", val);
break;
}
case MIRType::Object:
if (toObject().is<JSFunction>()) {
JSFunction* fun = &toObject().as<JSFunction>();
if (fun->maybePartialDisplayAtom()) {
out.put("function ");
EscapedStringPrinter(out, fun->maybePartialDisplayAtom(), 0);
} else {
out.put("unnamed function");
}
if (fun->hasBaseScript()) {
BaseScript* script = fun->baseScript();
out.printf(" (%s:%u)", script->filename() ? script->filename() : "",
script->lineno());
}
out.printf(" at %p", (void*)fun);
break;
}
out.printf("object %p (%s)", (void*)&toObject(),
toObject().getClass()->name);
break;
case MIRType::Symbol:
out.printf("symbol at %p", (void*)toSymbol());
break;
case MIRType::BigInt:
out.printf("BigInt at %p", (void*)toBigInt());
break;
case MIRType::String:
out.printf("string %p", (void*)toString());
break;
case MIRType::Shape:
out.printf("shape at %p", (void*)toShape());
break;
case MIRType::MagicHole:
out.printf("magic hole");
break;
case MIRType::MagicIsConstructing:
out.printf("magic is-constructing");
break;
case MIRType::MagicOptimizedOut:
out.printf("magic optimized-out");
break;
case MIRType::MagicUninitializedLexical:
out.printf("magic uninitialized-lexical");
break;
default:
MOZ_CRASH("unexpected type");
}
}
#endif
bool MConstant::canProduceFloat32() const {
if (!isTypeRepresentableAsDouble()) {
return false;
}
if (type() == MIRType::Int32) {
return IsFloat32Representable(static_cast<double>(toInt32()));
}
if (type() == MIRType::Double) {
return IsFloat32Representable(toDouble());
}
MOZ_ASSERT(type() == MIRType::Float32);
return true;
}
Value MConstant::toJSValue() const {
// Wasm has types like int64 that cannot be stored as js::Value. It also
// doesn't want the NaN canonicalization enforced by js::Value.
MOZ_ASSERT(!IsCompilingWasm());
switch (type()) {
case MIRType::Undefined:
return UndefinedValue();
case MIRType::Null:
return NullValue();
case MIRType::Boolean:
return BooleanValue(toBoolean());
case MIRType::Int32:
return Int32Value(toInt32());
case MIRType::Double:
return DoubleValue(toDouble());
case MIRType::Float32:
return Float32Value(toFloat32());
case MIRType::String:
return StringValue(toString()->unwrap());
case MIRType::Symbol:
return SymbolValue(toSymbol());
case MIRType::BigInt:
return BigIntValue(toBigInt());
case MIRType::Object:
return ObjectValue(toObject());
case MIRType::Shape:
return PrivateGCThingValue(toShape());
case MIRType::MagicOptimizedOut:
return MagicValue(JS_OPTIMIZED_OUT);
case MIRType::MagicHole:
return MagicValue(JS_ELEMENTS_HOLE);
case MIRType::MagicIsConstructing:
return MagicValue(JS_IS_CONSTRUCTING);
case MIRType::MagicUninitializedLexical:
return MagicValue(JS_UNINITIALIZED_LEXICAL);
default:
MOZ_CRASH("Unexpected type");
}
}
bool MConstant::valueToBoolean(bool* res) const {
switch (type()) {
case MIRType::Boolean:
*res = toBoolean();
return true;
case MIRType::Int32:
*res = toInt32() != 0;
return true;
case MIRType::Int64:
*res = toInt64() != 0;
return true;
case MIRType::IntPtr:
*res = toIntPtr() != 0;
return true;
case MIRType::Double:
*res = !std::isnan(toDouble()) && toDouble() != 0.0;
return true;
case MIRType::Float32:
*res = !std::isnan(toFloat32()) && toFloat32() != 0.0f;
return true;
case MIRType::Null:
case MIRType::Undefined:
*res = false;
return true;
case MIRType::Symbol:
*res = true;
return true;
case MIRType::BigInt:
*res = !toBigInt()->isZero();
return true;
case MIRType::String:
*res = toString()->length() != 0;
return true;
case MIRType::Object:
// TODO(Warp): Lazy groups have been removed.
// We have to call EmulatesUndefined but that reads obj->group->clasp
// and so it's racy when the object has a lazy group. The main callers
// of this (MTest, MNot) already know how to fold the object case, so
// just give up.
return false;
default:
MOZ_ASSERT(IsMagicType(type()));
return false;
}
}
#ifdef JS_JITSPEW
void MControlInstruction::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
if (numSuccessors() > 0) {
out.printf(" -> ");
}
for (size_t j = 0; j < numSuccessors(); j++) {
if (j > 0) {
out.printf(", ");
}
if (getSuccessor(j)) {
out.printf("block %u", getSuccessor(j)->id());
} else {
out.printf("(null-to-be-patched)");
}
}
}
void MCompare::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" %s", CodeName(jsop()));
}
void MTypeOfIs::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" %s", CodeName(jsop()));
const char* name = "";
switch (jstype()) {
case JSTYPE_UNDEFINED:
name = "undefined";
break;
case JSTYPE_OBJECT:
name = "object";
break;
case JSTYPE_FUNCTION:
name = "function";
break;
case JSTYPE_STRING:
name = "string";
break;
case JSTYPE_NUMBER:
name = "number";
break;
case JSTYPE_BOOLEAN:
name = "boolean";
break;
case JSTYPE_SYMBOL:
name = "symbol";
break;
case JSTYPE_BIGINT:
name = "bigint";
break;
case JSTYPE_LIMIT:
MOZ_CRASH("Unexpected type");
}
out.printf(" '%s'", name);
}
void MLoadUnboxedScalar::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" %s", Scalar::name(storageType()));
}
void MLoadDataViewElement::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" %s", Scalar::name(storageType()));
}
void MAssertRange::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.put(" ");
assertedRange()->dump(out);
}
void MNearbyInt::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
const char* roundingModeStr = nullptr;
switch (roundingMode_) {
case RoundingMode::Up:
roundingModeStr = "(up)";
break;
case RoundingMode::Down:
roundingModeStr = "(down)";
break;
case RoundingMode::NearestTiesToEven:
roundingModeStr = "(nearest ties even)";
break;
case RoundingMode::TowardsZero:
roundingModeStr = "(towards zero)";
break;
}
out.printf(" %s", roundingModeStr);
}
#endif
AliasSet MRandom::getAliasSet() const { return AliasSet::Store(AliasSet::RNG); }
MDefinition* MSign::foldsTo(TempAllocator& alloc) {
MDefinition* input = getOperand(0);
if (!input->isConstant() ||
!input->toConstant()->isTypeRepresentableAsDouble()) {
return this;
}
double in = input->toConstant()->numberToDouble();
double out = js::math_sign_impl(in);
if (type() == MIRType::Int32) {
// Decline folding if this is an int32 operation, but the result type
// isn't an int32.
int32_t i;
if (!mozilla::NumberIsInt32(out, &i)) {
return this;
}
return MConstant::NewInt32(alloc, i);
}
return MConstant::NewDouble(alloc, out);
}
const char* MMathFunction::FunctionName(UnaryMathFunction function) {
return GetUnaryMathFunctionName(function);
}
#ifdef JS_JITSPEW
void MMathFunction::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" %s", FunctionName(function()));
}
#endif
MDefinition* MMathFunction::foldsTo(TempAllocator& alloc) {
MDefinition* input = getOperand(0);
if (!input->isConstant() ||
!input->toConstant()->isTypeRepresentableAsDouble()) {
return this;
}
UnaryMathFunctionType funPtr = GetUnaryMathFunctionPtr(function());
double in = input->toConstant()->numberToDouble();
// The function pointer call can't GC.
JS::AutoSuppressGCAnalysis nogc;
double out = funPtr(in);
if (input->type() == MIRType::Float32) {
return MConstant::NewFloat32(alloc, out);
}
return MConstant::NewDouble(alloc, out);
}
MDefinition* MAtomicIsLockFree::foldsTo(TempAllocator& alloc) {
MDefinition* input = getOperand(0);
if (!input->isConstant() || input->type() != MIRType::Int32) {
return this;
}
int32_t i = input->toConstant()->toInt32();
return MConstant::NewBoolean(alloc, AtomicOperations::isLockfreeJS(i));
}
// Define |THIS_SLOT| as part of this translation unit, as it is used to
// specialized the parameterized |New| function calls introduced by
// TRIVIAL_NEW_WRAPPERS.
const int32_t MParameter::THIS_SLOT;
#ifdef JS_JITSPEW
void MParameter::printOpcode(GenericPrinter& out) const {
PrintOpcodeName(out, op());
if (index() == THIS_SLOT) {
out.printf(" THIS_SLOT");
} else {
out.printf(" %d", index());
}
}
#endif
HashNumber MParameter::valueHash() const {
HashNumber hash = MNullaryInstruction::valueHash();
hash = addU32ToHash(hash, index_);
return hash;
}
bool MParameter::congruentTo(const MDefinition* ins) const {
if (!ins->isParameter()) {
return false;
}
return ins->toParameter()->index() == index_;
}
WrappedFunction::WrappedFunction(JSFunction* nativeFun, uint16_t nargs,
FunctionFlags flags)
: nativeFun_(nativeFun), nargs_(nargs), flags_(flags) {
MOZ_ASSERT_IF(nativeFun, isNativeWithoutJitEntry());
#ifdef DEBUG
// If we are not running off-main thread we can assert that the
// metadata is consistent.
if (!CanUseExtraThreads() && nativeFun) {
MOZ_ASSERT(nativeFun->nargs() == nargs);
MOZ_ASSERT(nativeFun->isNativeWithoutJitEntry() ==
isNativeWithoutJitEntry());
MOZ_ASSERT(nativeFun->hasJitEntry() == hasJitEntry());
MOZ_ASSERT(nativeFun->isConstructor() == isConstructor());
MOZ_ASSERT(nativeFun->isClassConstructor() == isClassConstructor());
}
#endif
}
MCall* MCall::New(TempAllocator& alloc, WrappedFunction* target, size_t maxArgc,
size_t numActualArgs, bool construct, bool ignoresReturnValue,
bool isDOMCall, mozilla::Maybe<DOMObjectKind> objectKind,
mozilla::Maybe<gc::Heap> initialHeap) {
MOZ_ASSERT(isDOMCall == objectKind.isSome());
MOZ_ASSERT(isDOMCall == initialHeap.isSome());
MOZ_ASSERT(maxArgc >= numActualArgs);
MCall* ins;
if (isDOMCall) {
MOZ_ASSERT(!construct);
ins = new (alloc)
MCallDOMNative(target, numActualArgs, *objectKind, *initialHeap);
} else {
ins =
new (alloc) MCall(target, numActualArgs, construct, ignoresReturnValue);
}
if (!ins->init(alloc, maxArgc + NumNonArgumentOperands)) {
return nullptr;
}
return ins;
}
AliasSet MCallDOMNative::getAliasSet() const {
const JSJitInfo* jitInfo = getJitInfo();
// If we don't know anything about the types of our arguments, we have to
// assume that type-coercions can have side-effects, so we need to alias
// everything.
if (jitInfo->aliasSet() == JSJitInfo::AliasEverything ||
!jitInfo->isTypedMethodJitInfo()) {
return AliasSet::Store(AliasSet::Any);
}
uint32_t argIndex = 0;
const JSTypedMethodJitInfo* methodInfo =
reinterpret_cast<const JSTypedMethodJitInfo*>(jitInfo);
for (const JSJitInfo::ArgType* argType = methodInfo->argTypes;
*argType != JSJitInfo::ArgTypeListEnd; ++argType, ++argIndex) {
if (argIndex >= numActualArgs()) {
// Passing through undefined can't have side-effects
continue;
}
// getArg(0) is "this", so skip it
MDefinition* arg = getArg(argIndex + 1);
MIRType actualType = arg->type();
// The only way to reliably avoid side-effects given the information we
// have here is if we're passing in a known primitive value to an
// argument that expects a primitive value.
//
// XXXbz maybe we need to communicate better information. For example,
// a sequence argument will sort of unavoidably have side effects, while
// a typed array argument won't have any, but both are claimed to be
// JSJitInfo::Object. But if we do that, we need to watch out for our
// movability/DCE-ability bits: if we have an arg type that can reliably
// throw an exception on conversion, that might not affect our alias set
// per se, but it should prevent us being moved or DCE-ed, unless we
// know the incoming things match that arg type and won't throw.
//
if ((actualType == MIRType::Value || actualType == MIRType::Object) ||
(*argType & JSJitInfo::Object)) {
return AliasSet::Store(AliasSet::Any);
}
}
// We checked all the args, and they check out. So we only alias DOM
// mutations or alias nothing, depending on the alias set in the jitinfo.
if (jitInfo->aliasSet() == JSJitInfo::AliasNone) {
return AliasSet::None();
}
MOZ_ASSERT(jitInfo->aliasSet() == JSJitInfo::AliasDOMSets);
return AliasSet::Load(AliasSet::DOMProperty);
}
void MCallDOMNative::computeMovable() {
// We are movable if the jitinfo says we can be and if we're also not
// effectful. The jitinfo can't check for the latter, since it depends on
// the types of our arguments.
const JSJitInfo* jitInfo = getJitInfo();
MOZ_ASSERT_IF(jitInfo->isMovable,
jitInfo->aliasSet() != JSJitInfo::AliasEverything);
if (jitInfo->isMovable && !isEffectful()) {
setMovable();
}
}
bool MCallDOMNative::congruentTo(const MDefinition* ins) const {
if (!isMovable()) {
return false;
}
if (!ins->isCall()) {
return false;
}
const MCall* call = ins->toCall();
if (!call->isCallDOMNative()) {
return false;
}
if (getSingleTarget() != call->getSingleTarget()) {
return false;
}
if (isConstructing() != call->isConstructing()) {
return false;
}
if (numActualArgs() != call->numActualArgs()) {
return false;
}
if (!congruentIfOperandsEqual(call)) {
return false;
}
// The other call had better be movable at this point!
MOZ_ASSERT(call->isMovable());
return true;
}
const JSJitInfo* MCallDOMNative::getJitInfo() const {
MOZ_ASSERT(getSingleTarget()->hasJitInfo());
return getSingleTarget()->jitInfo();
}
MCallClassHook* MCallClassHook::New(TempAllocator& alloc, JSNative target,
uint32_t argc, bool constructing) {
auto* ins = new (alloc) MCallClassHook(target, constructing);
// Add callee + |this| + (if constructing) newTarget.
uint32_t numOperands = 2 + argc + constructing;
if (!ins->init(alloc, numOperands)) {
return nullptr;
}
return ins;
}
MDefinition* MStringLength::foldsTo(TempAllocator& alloc) {
if (string()->isConstant()) {
JSOffThreadAtom* str = string()->toConstant()->toString();
return MConstant::NewInt32(alloc, str->length());
}
// MFromCharCode returns a one-element string.
if (string()->isFromCharCode()) {
return MConstant::NewInt32(alloc, 1);
}
return this;
}
MDefinition* MConcat::foldsTo(TempAllocator& alloc) {
if (lhs()->isConstant() && lhs()->toConstant()->toString()->empty()) {
return rhs();
}
if (rhs()->isConstant() && rhs()->toConstant()->toString()->empty()) {
return lhs();
}
return this;
}
MDefinition* MStringConvertCase::foldsTo(TempAllocator& alloc) {
MDefinition* string = this->string();
// Handle the pattern |str[idx].toUpperCase()| and simplify it from
// |StringConvertCase(FromCharCode(CharCodeAt(str, idx)))| to just
// |CharCodeConvertCase(CharCodeAt(str, idx))|.
if (string->isFromCharCode()) {
auto* charCode = string->toFromCharCode()->code();
auto mode = mode_ == Mode::LowerCase ? MCharCodeConvertCase::LowerCase
: MCharCodeConvertCase::UpperCase;
return MCharCodeConvertCase::New(alloc, charCode, mode);
}
// Handle the pattern |num.toString(base).toUpperCase()| and simplify it to
// directly return the string representation in the correct case.
if (string->isInt32ToStringWithBase()) {
auto* toString = string->toInt32ToStringWithBase();
bool lowerCase = mode_ == Mode::LowerCase;
if (toString->lowerCase() == lowerCase) {
return toString;
}
return MInt32ToStringWithBase::New(alloc, toString->input(),
toString->base(), lowerCase);
}
return this;
}
// Return true if |def| is `MConstant(Int32(0))`.
static bool IsConstantZeroInt32(MDefinition* def) {
return def->isConstant() && def->toConstant()->isInt32(0);
}
// If |def| is `MBitOr` and one operand is `MConstant(Int32(0))`, then return
// the other operand. Otherwise return |def|.
static MDefinition* RemoveUnnecessaryBitOps(MDefinition* def) {
if (def->isBitOr()) {
auto* bitOr = def->toBitOr();
if (IsConstantZeroInt32(bitOr->lhs())) {
return bitOr->rhs();
}
if (IsConstantZeroInt32(bitOr->rhs())) {
return bitOr->lhs();
}
}
return def;
}
// Return a match if both operands of |binary| have the requested types. If
// |binary| is commutative, the operands may appear in any order.
template <typename Lhs, typename Rhs>
static mozilla::Maybe<std::pair<Lhs*, Rhs*>> MatchOperands(
MBinaryInstruction* binary) {
auto* lhs = binary->lhs();
auto* rhs = binary->rhs();
if (lhs->is<Lhs>() && rhs->is<Rhs>()) {
return mozilla::Some(std::pair{lhs->to<Lhs>(), rhs->to<Rhs>()});
}
if (binary->isCommutative() && rhs->is<Lhs>() && lhs->is<Rhs>()) {
return mozilla::Some(std::pair{rhs->to<Lhs>(), lhs->to<Rhs>()});
}
return mozilla::Nothing();
}
static bool IsSubstrTo(MSubstr* substr, int32_t len) {
// We want to match this pattern:
//
// Substr(string, Constant(0), Min(Constant(length), StringLength(string)))
//
// which is generated for the self-hosted `String.p.{substring,slice,substr}`
// functions when called with constants `start` and `end` parameters.
if (!IsConstantZeroInt32(substr->begin())) {
return false;
}
// Unnecessary bit-ops haven't yet been removed.
auto* length = RemoveUnnecessaryBitOps(substr->length());
if (!length->isMinMax() || length->toMinMax()->isMax()) {
return false;
}
auto match = MatchOperands<MConstant, MStringLength>(length->toMinMax());
if (!match) {
return false;
}
// Ensure |len| matches the substring's length.
auto [cst, strLength] = *match;
return cst->isInt32(len) && strLength->string() == substr->string();
}
static bool IsSubstrLast(MSubstr* substr, int32_t start) {
MOZ_ASSERT(start < 0, "start from end is negative");
// We want to match either this pattern:
//
// begin = Max(StringLength(string) + start, 0)
// length = Max(StringLength(string) - begin, 0)
// Substr(string, begin, length)
//
// or this pattern:
//
// begin = Max(StringLength(string) + start, 0)
// length = Min(StringLength(string), StringLength(string) - begin)
// Substr(string, begin, length)
//
// which is generated for the self-hosted `String.p.{slice,substr}`
// functions when called with parameters `start < 0` and `end = undefined`.
auto* string = substr->string();
// Unnecessary bit-ops haven't yet been removed.
auto* begin = RemoveUnnecessaryBitOps(substr->begin());
auto* length = RemoveUnnecessaryBitOps(substr->length());
// Matches: Max(StringLength(string) + start, 0)
auto matchesBegin = [&]() {
if (!begin->isMinMax() || !begin->toMinMax()->isMax()) {
return false;
}
auto maxOperands = MatchOperands<MAdd, MConstant>(begin->toMinMax());
if (!maxOperands) {
return false;
}
auto [add, cst] = *maxOperands;
if (!cst->isInt32(0)) {
return false;
}
auto addOperands = MatchOperands<MStringLength, MConstant>(add);
if (!addOperands) {
return false;
}
auto [strLength, cstAdd] = *addOperands;
return strLength->string() == string && cstAdd->isInt32(start);
};
// Matches: Max(StringLength(string) - begin, 0)
auto matchesSliceLength = [&]() {
if (!length->isMinMax() || !length->toMinMax()->isMax()) {
return false;
}
auto maxOperands = MatchOperands<MSub, MConstant>(length->toMinMax());
if (!maxOperands) {
return false;
}
auto [sub, cst] = *maxOperands;
if (!cst->isInt32(0)) {
return false;
}
auto subOperands = MatchOperands<MStringLength, MMinMax>(sub);
if (!subOperands) {
return false;
}
auto [strLength, minmax] = *subOperands;
return strLength->string() == string && minmax == begin;
};
// Matches: Min(StringLength(string), StringLength(string) - begin)
auto matchesSubstrLength = [&]() {
if (!length->isMinMax() || length->toMinMax()->isMax()) {
return false;
}
auto minOperands = MatchOperands<MStringLength, MSub>(length->toMinMax());
if (!minOperands) {
return false;
}
auto [strLength1, sub] = *minOperands;
if (strLength1->string() != string) {
return false;
}
auto subOperands = MatchOperands<MStringLength, MMinMax>(sub);
if (!subOperands) {
return false;
}
auto [strLength2, minmax] = *subOperands;
return strLength2->string() == string && minmax == begin;
};
return matchesBegin() && (matchesSliceLength() || matchesSubstrLength());
}
MDefinition* MSubstr::foldsTo(TempAllocator& alloc) {
// Fold |str.substring(0, 1)| to |str.charAt(0)|.
if (IsSubstrTo(this, 1)) {
MOZ_ASSERT(IsConstantZeroInt32(begin()));
auto* charCode = MCharCodeAtOrNegative::New(alloc, string(), begin());
block()->insertBefore(this, charCode);
return MFromCharCodeEmptyIfNegative::New(alloc, charCode);
}
// Fold |str.slice(-1)| and |str.substr(-1)| to |str.charAt(str.length + -1)|.
if (IsSubstrLast(this, -1)) {
auto* length = MStringLength::New(alloc, string());
block()->insertBefore(this, length);
auto* index = MConstant::NewInt32(alloc, -1);
block()->insertBefore(this, index);
// Folded MToRelativeStringIndex, see MToRelativeStringIndex::foldsTo.
//
// Safe to truncate because |length| is never negative.
auto* add = MAdd::New(alloc, index, length, TruncateKind::Truncate);
block()->insertBefore(this, add);
auto* charCode = MCharCodeAtOrNegative::New(alloc, string(), add);
block()->insertBefore(this, charCode);
return MFromCharCodeEmptyIfNegative::New(alloc, charCode);
}
return this;
}
MDefinition* MCharCodeAt::foldsTo(TempAllocator& alloc) {
MDefinition* string = this->string();
if (!string->isConstant() && !string->isFromCharCode()) {
return this;
}
MDefinition* index = this->index();
if (index->isSpectreMaskIndex()) {
index = index->toSpectreMaskIndex()->index();
}
if (!index->isConstant()) {
return this;
}
int32_t idx = index->toConstant()->toInt32();
// Handle the pattern |s[idx].charCodeAt(0)|.
if (string->isFromCharCode()) {
if (idx != 0) {
return this;
}
// Simplify |CharCodeAt(FromCharCode(CharCodeAt(s, idx)), 0)| to just
// |CharCodeAt(s, idx)|.
auto* charCode = string->toFromCharCode()->code();
if (!charCode->isCharCodeAt()) {
return this;
}
return charCode;
}
JSOffThreadAtom* str = string->toConstant()->toString();
if (idx < 0 || uint32_t(idx) >= str->length()) {
return this;
}
char16_t ch = str->latin1OrTwoByteChar(idx);
return MConstant::NewInt32(alloc, ch);
}
MDefinition* MCodePointAt::foldsTo(TempAllocator& alloc) {
MDefinition* string = this->string();
if (!string->isConstant() && !string->isFromCharCode()) {
return this;
}
MDefinition* index = this->index();
if (index->isSpectreMaskIndex()) {
index = index->toSpectreMaskIndex()->index();
}
if (!index->isConstant()) {
return this;
}
int32_t idx = index->toConstant()->toInt32();
// Handle the pattern |s[idx].codePointAt(0)|.
if (string->isFromCharCode()) {
if (idx != 0) {
return this;
}
// Simplify |CodePointAt(FromCharCode(CharCodeAt(s, idx)), 0)| to just
// |CharCodeAt(s, idx)|.
auto* charCode = string->toFromCharCode()->code();
if (!charCode->isCharCodeAt()) {
return this;
}
return charCode;
}
JSOffThreadAtom* str = string->toConstant()->toString();
if (idx < 0 || uint32_t(idx) >= str->length()) {
return this;
}
char32_t first = str->latin1OrTwoByteChar(idx);
if (unicode::IsLeadSurrogate(first) && uint32_t(idx) + 1 < str->length()) {
char32_t second = str->latin1OrTwoByteChar(idx + 1);
if (unicode::IsTrailSurrogate(second)) {
first = unicode::UTF16Decode(first, second);
}
}
return MConstant::NewInt32(alloc, first);
}
MDefinition* MToRelativeStringIndex::foldsTo(TempAllocator& alloc) {
MDefinition* index = this->index();
MDefinition* length = this->length();
if (!index->isConstant()) {
return this;
}
if (!length->isStringLength() && !length->isConstant()) {
return this;
}
MOZ_ASSERT_IF(length->isConstant(), length->toConstant()->toInt32() >= 0);
int32_t relativeIndex = index->toConstant()->toInt32();
if (relativeIndex >= 0) {
return index;
}
// Safe to truncate because |length| is never negative.
return MAdd::New(alloc, index, length, TruncateKind::Truncate);
}
template <size_t Arity>
[[nodiscard]] static bool EnsureFloatInputOrConvert(
MAryInstruction<Arity>* owner, TempAllocator& alloc) {
MOZ_ASSERT(!IsFloatingPointType(owner->type()),
"Floating point types must check consumers");
if (AllOperandsCanProduceFloat32(owner)) {
return true;
}
ConvertOperandsToDouble(owner, alloc);
return false;
}
template <size_t Arity>
[[nodiscard]] static bool EnsureFloatConsumersAndInputOrConvert(
MAryInstruction<Arity>* owner, TempAllocator& alloc) {
MOZ_ASSERT(IsFloatingPointType(owner->type()),
"Integer types don't need to check consumers");
if (AllOperandsCanProduceFloat32(owner) &&
CheckUsesAreFloat32Consumers(owner)) {
return true;
}
ConvertOperandsToDouble(owner, alloc);
return false;
}
void MFloor::trySpecializeFloat32(TempAllocator& alloc) {
MOZ_ASSERT(type() == MIRType::Int32);
if (EnsureFloatInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
}
}
void MCeil::trySpecializeFloat32(TempAllocator& alloc) {
MOZ_ASSERT(type() == MIRType::Int32);
if (EnsureFloatInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
}
}
void MRound::trySpecializeFloat32(TempAllocator& alloc) {
MOZ_ASSERT(type() == MIRType::Int32);
if (EnsureFloatInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
}
}
void MTrunc::trySpecializeFloat32(TempAllocator& alloc) {
MOZ_ASSERT(type() == MIRType::Int32);
if (EnsureFloatInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
}
}
void MNearbyInt::trySpecializeFloat32(TempAllocator& alloc) {
if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
setResultType(MIRType::Float32);
}
}
void MRoundToDouble::trySpecializeFloat32(TempAllocator& alloc) {
if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
specialization_ = MIRType::Float32;
setResultType(MIRType::Float32);
}
}
MGoto* MGoto::New(TempAllocator& alloc, MBasicBlock* target) {
return new (alloc) MGoto(target);
}
MGoto* MGoto::New(TempAllocator::Fallible alloc, MBasicBlock* target) {
MOZ_ASSERT(target);
return new (alloc) MGoto(target);
}
MGoto* MGoto::New(TempAllocator& alloc) { return new (alloc) MGoto(nullptr); }
MDefinition* MBox::foldsTo(TempAllocator& alloc) {
if (input()->isUnbox()) {
return input()->toUnbox()->input();
}
return this;
}
#ifdef JS_JITSPEW
void MUnbox::printOpcode(GenericPrinter& out) const {
PrintOpcodeName(out, op());
out.printf(" ");
getOperand(0)->printName(out);
out.printf(" ");
switch (type()) {
case MIRType::Int32:
out.printf("to Int32");
break;
case MIRType::Double:
out.printf("to Double");
break;
case MIRType::Boolean:
out.printf("to Boolean");
break;
case MIRType::String:
out.printf("to String");
break;
case MIRType::Symbol:
out.printf("to Symbol");
break;
case MIRType::BigInt:
out.printf("to BigInt");
break;
case MIRType::Object:
out.printf("to Object");
break;
default:
break;
}
switch (mode()) {
case Fallible:
out.printf(" (fallible)");
break;
case Infallible:
out.printf(" (infallible)");
break;
default:
break;
}
}
#endif
MDefinition* MUnbox::foldsTo(TempAllocator& alloc) {
if (input()->isBox()) {
MDefinition* unboxed = input()->toBox()->input();
// Fold MUnbox(MBox(x)) => x if types match.
if (unboxed->type() == type()) {
if (fallible()) {
unboxed->setImplicitlyUsedUnchecked();
}
return unboxed;
}
// Fold MUnbox(MBox(x)) => MToDouble(x) if possible.
if (type() == MIRType::Double &&
IsTypeRepresentableAsDouble(unboxed->type())) {
if (unboxed->isConstant()) {
return MConstant::NewDouble(alloc,
unboxed->toConstant()->numberToDouble());
}
return MToDouble::New(alloc, unboxed);
}
// MUnbox<Int32>(MBox<Double>(x)) will always fail, even if x can be
// represented as an Int32. Fold to avoid unnecessary bailouts.
if (type() == MIRType::Int32 && unboxed->type() == MIRType::Double) {
auto* folded = MToNumberInt32::New(alloc, unboxed,
IntConversionInputKind::NumbersOnly);
folded->setGuard();
return folded;
}
}
return this;
}
#ifdef DEBUG
void MPhi::assertLoopPhi() const {
// getLoopPredecessorOperand and getLoopBackedgeOperand rely on these
// predecessors being at known indices.
if (block()->numPredecessors() == 2) {
MBasicBlock* pred = block()->getPredecessor(0);
MBasicBlock* back = block()->getPredecessor(1);
MOZ_ASSERT(pred == block()->loopPredecessor());
MOZ_ASSERT(pred->successorWithPhis() == block());
MOZ_ASSERT(pred->positionInPhiSuccessor() == 0);
MOZ_ASSERT(back == block()->backedge());
MOZ_ASSERT(back->successorWithPhis() == block());
MOZ_ASSERT(back->positionInPhiSuccessor() == 1);
} else {
// After we remove fake loop predecessors for loop headers that
// are only reachable via OSR, the only predecessor is the
// loop backedge.
MOZ_ASSERT(block()->numPredecessors() == 1);
MOZ_ASSERT(block()->graph().osrBlock());
MOZ_ASSERT(!block()->graph().canBuildDominators());
MBasicBlock* back = block()->getPredecessor(0);
MOZ_ASSERT(back == block()->backedge());
MOZ_ASSERT(back->successorWithPhis() == block());
MOZ_ASSERT(back->positionInPhiSuccessor() == 0);
}
}
#endif
MDefinition* MPhi::getLoopPredecessorOperand() const {
// This should not be called after removing fake loop predecessors.
MOZ_ASSERT(block()->numPredecessors() == 2);
assertLoopPhi();
return getOperand(0);
}
MDefinition* MPhi::getLoopBackedgeOperand() const {
assertLoopPhi();
uint32_t idx = block()->numPredecessors() == 2 ? 1 : 0;
return getOperand(idx);
}
void MPhi::removeOperand(size_t index) {
MOZ_ASSERT(index < numOperands());
MOZ_ASSERT(getUseFor(index)->index() == index);
MOZ_ASSERT(getUseFor(index)->consumer() == this);
// If we have phi(..., a, b, c, d, ..., z) and we plan
// on removing a, then first shift downward so that we have
// phi(..., b, c, d, ..., z, z):
MUse* p = inputs_.begin() + index;
MUse* e = inputs_.end();
p->producer()->removeUse(p);
for (; p < e - 1; ++p) {
MDefinition* producer = (p + 1)->producer();
p->setProducerUnchecked(producer);
producer->replaceUse(p + 1, p);
}
// truncate the inputs_ list:
inputs_.popBack();
}
void MPhi::removeAllOperands() {
for (MUse& p : inputs_) {
p.producer()->removeUse(&p);
}
inputs_.clear();
}
MDefinition* MPhi::foldsTernary(TempAllocator& alloc) {
/* Look if this MPhi is a ternary construct.
* This is a very loose term as it actually only checks for
*
* MTest X
* / \
* ... ...
* \ /
* MPhi X Y
*
* Which we will simply call:
* x ? x : y or x ? y : x
*/
if (numOperands() != 2) {
return nullptr;
}
MOZ_ASSERT(block()->numPredecessors() == 2);
MBasicBlock* pred = block()->immediateDominator();
if (!pred || !pred->lastIns()->isTest()) {
return nullptr;
}
MTest* test = pred->lastIns()->toTest();
// True branch may only dominate one edge of MPhi.
if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
test->ifTrue()->dominates(block()->getPredecessor(1))) {
return nullptr;
}
// False branch may only dominate one edge of MPhi.
if (test->ifFalse()->dominates(block()->getPredecessor(0)) ==
test->ifFalse()->dominates(block()->getPredecessor(1))) {
return nullptr;
}
// True and false branch must dominate different edges of MPhi.
if (test->ifTrue()->dominates(block()->getPredecessor(0)) ==
test->ifFalse()->dominates(block()->getPredecessor(0))) {
return nullptr;
}
// We found a ternary construct.
bool firstIsTrueBranch =
test->ifTrue()->dominates(block()->getPredecessor(0));
MDefinition* trueDef = firstIsTrueBranch ? getOperand(0) : getOperand(1);
MDefinition* falseDef = firstIsTrueBranch ? getOperand(1) : getOperand(0);
// Accept either
// testArg ? testArg : constant or
// testArg ? constant : testArg
if (!trueDef->isConstant() && !falseDef->isConstant()) {
return nullptr;
}
MConstant* c =
trueDef->isConstant() ? trueDef->toConstant() : falseDef->toConstant();
MDefinition* testArg = (trueDef == c) ? falseDef : trueDef;
if (testArg != test->input()) {
return nullptr;
}
// This check should be a tautology, except that the constant might be the
// result of the removal of a branch. In such case the domination scope of
// the block which is holding the constant might be incomplete. This
// condition is used to prevent doing this optimization based on incomplete
// information.
//
// As GVN removed a branch, it will update the dominations rules before
// trying to fold this MPhi again. Thus, this condition does not inhibit
// this optimization.
MBasicBlock* truePred = block()->getPredecessor(firstIsTrueBranch ? 0 : 1);
MBasicBlock* falsePred = block()->getPredecessor(firstIsTrueBranch ? 1 : 0);
if (!trueDef->block()->dominates(truePred) ||
!falseDef->block()->dominates(falsePred)) {
return nullptr;
}
// If testArg is an int32 type we can:
// - fold testArg ? testArg : 0 to testArg
// - fold testArg ? 0 : testArg to 0
if (testArg->type() == MIRType::Int32 && c->numberToDouble() == 0) {
testArg->setGuardRangeBailoutsUnchecked();
// When folding to the constant we need to hoist it.
if (trueDef == c && !c->block()->dominates(block())) {
c->block()->moveBefore(pred->lastIns(), c);
}
return trueDef;
}
// If testArg is an double type we can:
// - fold testArg ? testArg : 0.0 to MNaNToZero(testArg)
if (testArg->type() == MIRType::Double &&
mozilla::IsPositiveZero(c->numberToDouble()) && c != trueDef) {
MNaNToZero* replace = MNaNToZero::New(alloc, testArg);
test->block()->insertBefore(test, replace);
return replace;
}
// If testArg is a string type we can:
// - fold testArg ? testArg : "" to testArg
// - fold testArg ? "" : testArg to ""
if (testArg->type() == MIRType::String && c->toString()->empty()) {
// When folding to the constant we need to hoist it.
if (trueDef == c && !c->block()->dominates(block())) {
c->block()->moveBefore(pred->lastIns(), c);
}
return trueDef;
}
return nullptr;
}
MDefinition* MPhi::operandIfRedundant() {
if (inputs_.length() == 0) {
return nullptr;
}
// If this phi is redundant (e.g., phi(a,a) or b=phi(a,this)),
// returns the operand that it will always be equal to (a, in
// those two cases).
MDefinition* first = getOperand(0);
for (size_t i = 1, e = numOperands(); i < e; i++) {
MDefinition* op = getOperand(i);
if (op != first && op != this) {
return nullptr;
}
}
return first;
}
MDefinition* MPhi::foldsTo(TempAllocator& alloc) {
if (MDefinition* def = operandIfRedundant()) {
return def;
}
if (MDefinition* def = foldsTernary(alloc)) {
return def;
}
return this;
}
bool MPhi::congruentTo(const MDefinition* ins) const {
if (!ins->isPhi()) {
return false;
}
// Phis in different blocks may have different control conditions.
// For example, these phis:
//
// if (p)
// goto a
// a:
// t = phi(x, y)
//
// if (q)
// goto b
// b:
// s = phi(x, y)
//
// have identical operands, but they are not equvalent because t is
// effectively p?x:y and s is effectively q?x:y.
//
// For now, consider phis in different blocks incongruent.
if (ins->block() != block()) {
return false;
}
return congruentIfOperandsEqual(ins);
}
void MPhi::updateForReplacement(MPhi* other) {
// This function is called to fix the current Phi flags using it as a
// replacement of the other Phi instruction |other|.
//
// When dealing with usage analysis, any Use will replace all other values,
// such as Unused and Unknown. Unless both are Unused, the merge would be
// Unknown.
if (usageAnalysis_ == PhiUsage::Used ||
other->usageAnalysis_ == PhiUsage::Used) {
usageAnalysis_ = PhiUsage::Used;
} else if (usageAnalysis_ != other->usageAnalysis_) {
// this == unused && other == unknown
// or this == unknown && other == unused
usageAnalysis_ = PhiUsage::Unknown;
} else {
// this == unused && other == unused
// or this == unknown && other = unknown
MOZ_ASSERT(usageAnalysis_ == PhiUsage::Unused ||
usageAnalysis_ == PhiUsage::Unknown);
MOZ_ASSERT(usageAnalysis_ == other->usageAnalysis_);
}
}
/* static */
bool MPhi::markIteratorPhis(const PhiVector& iterators) {
// Find and mark phis that must transitively hold an iterator live.
Vector<MPhi*, 8, SystemAllocPolicy> worklist;
for (MPhi* iter : iterators) {
if (!iter->isInWorklist()) {
if (!worklist.append(iter)) {
return false;
}
iter->setInWorklist();
}
}
while (!worklist.empty()) {
MPhi* phi = worklist.popCopy();
phi->setNotInWorklist();
phi->setIterator();
phi->setImplicitlyUsedUnchecked();
for (MUseDefIterator iter(phi); iter; iter++) {
MDefinition* use = iter.def();
if (!use->isInWorklist() && use->isPhi() && !use->toPhi()->isIterator()) {
if (!worklist.append(use->toPhi())) {
return false;
}
use->setInWorklist();
}
}
}
return true;
}
bool MPhi::typeIncludes(MDefinition* def) {
MOZ_ASSERT(!IsMagicType(def->type()));
if (def->type() == this->type()) {
return true;
}
// This phi must be able to be any value.
if (this->type() == MIRType::Value) {
return true;
}
if (def->type() == MIRType::Int32 && this->type() == MIRType::Double) {
return true;
}
return false;
}
void MCallBase::addArg(size_t argnum, MDefinition* arg) {
// The operand vector is initialized in reverse order by WarpBuilder.
// It cannot be checked for consistency until all arguments are added.
// FixedList doesn't initialize its elements, so do an unchecked init.
initOperand(argnum + NumNonArgumentOperands, arg);
}
static inline bool IsConstant(MDefinition* def, double v) {
if (!def->isConstant()) {
return false;
}
return NumbersAreIdentical(def->toConstant()->numberToDouble(), v);
}
static inline bool IsConstantInt64(MDefinition* def, int64_t v) {
if (!def->isConstant()) {
return false;
}
return def->toConstant()->toInt64() == v;
}
static inline bool IsConstantIntPtr(MDefinition* def, intptr_t v) {
if (!def->isConstant()) {
return false;
}
return def->toConstant()->toIntPtr() == v;
}
MDefinition* MBinaryBitwiseInstruction::foldsTo(TempAllocator& alloc) {
// Identity operations are removed (for int32 only) in foldUnnecessaryBitop.
if (type() == MIRType::Int32) {
if (MDefinition* folded = EvaluateInt32ConstantOperands(alloc, this)) {
return folded;
}
} else if (type() == MIRType::Int64) {
if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
return folded;
}
} else if (type() == MIRType::IntPtr) {
if (MDefinition* folded = EvaluateIntPtrConstantOperands(alloc, this)) {
return folded;
}
}
return this;
}
MDefinition* MBinaryBitwiseInstruction::foldUnnecessaryBitop() {
// It's probably OK to perform this optimization only for int32, as it will
// have the greatest effect for asm.js code that is compiled with the JS
// pipeline, and that code will not see int64 values.
if (type() != MIRType::Int32) {
return this;
}
// Fold unsigned shift right operator when the second operand is zero and
// the only use is an unsigned modulo. Thus, the expression
// |(x >>> 0) % y| becomes |x % y|.
if (isUrsh() && IsUint32Type(this)) {
MDefinition* defUse = maybeSingleDefUse();
if (defUse && defUse->isMod() && defUse->toMod()->isUnsigned()) {
return getOperand(0);
}
}
// Eliminate bitwise operations that are no-ops when used on integer
// inputs, such as (x | 0).
MDefinition* lhs = getOperand(0);
MDefinition* rhs = getOperand(1);
if (IsConstant(lhs, 0)) {
return foldIfZero(0);
}
if (IsConstant(rhs, 0)) {
return foldIfZero(1);
}
if (IsConstant(lhs, -1)) {
return foldIfNegOne(0);
}
if (IsConstant(rhs, -1)) {
return foldIfNegOne(1);
}
if (lhs == rhs) {
return foldIfEqual();
}
if (maskMatchesRightRange) {
MOZ_ASSERT(lhs->isConstant());
MOZ_ASSERT(lhs->type() == MIRType::Int32);
return foldIfAllBitsSet(0);
}
if (maskMatchesLeftRange) {
MOZ_ASSERT(rhs->isConstant());
MOZ_ASSERT(rhs->type() == MIRType::Int32);
return foldIfAllBitsSet(1);
}
return this;
}
static inline bool CanProduceNegativeZero(MDefinition* def) {
// Test if this instruction can produce negative zero even when bailing out
// and changing types.
switch (def->op()) {
case MDefinition::Opcode::Constant:
if (def->type() == MIRType::Double &&
def->toConstant()->toDouble() == -0.0) {
return true;
}
[[fallthrough]];
case MDefinition::Opcode::BitAnd:
case MDefinition::Opcode::BitOr:
case MDefinition::Opcode::BitXor:
case MDefinition::Opcode::BitNot:
case MDefinition::Opcode::Lsh:
case MDefinition::Opcode::Rsh:
return false;
default:
return true;
}
}
static inline bool NeedNegativeZeroCheck(MDefinition* def) {
if (def->isGuard() || def->isGuardRangeBailouts()) {
return true;
}
// Test if all uses have the same semantics for -0 and 0
for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
if (use->consumer()->isResumePoint()) {
return true;
}
MDefinition* use_def = use->consumer()->toDefinition();
switch (use_def->op()) {
case MDefinition::Opcode::Add: {
// If add is truncating -0 and 0 are observed as the same.
if (use_def->toAdd()->isTruncated()) {
break;
}
// x + y gives -0, when both x and y are -0
// Figure out the order in which the addition's operands will
// execute. EdgeCaseAnalysis::analyzeLate has renumbered the MIR
// definitions for us so that this just requires comparing ids.
MDefinition* first = use_def->toAdd()->lhs();
MDefinition* second = use_def->toAdd()->rhs();
if (first->id() > second->id()) {
std::swap(first, second);
}
// Negative zero checks can be removed on the first executed
// operand only if it is guaranteed the second executed operand
// will produce a value other than -0. While the second is
// typed as an int32, a bailout taken between execution of the
// operands may change that type and cause a -0 to flow to the
// second.
//
// There is no way to test whether there are any bailouts
// between execution of the operands, so remove negative
// zero checks from the first only if the second's type is
// independent from type changes that may occur after bailing.
if (def == first && CanProduceNegativeZero(second)) {
return true;
}
// The negative zero check can always be removed on the second
// executed operand; by the time this executes the first will have
// been evaluated as int32 and the addition's result cannot be -0.
break;
}
case MDefinition::Opcode::Sub: {
// If sub is truncating -0 and 0 are observed as the same
if (use_def->toSub()->isTruncated()) {
break;
}
// x + y gives -0, when x is -0 and y is 0
// We can remove the negative zero check on the rhs, only if we
// are sure the lhs isn't negative zero.
// The lhs is typed as integer (i.e. not -0.0), but it can bailout
// and change type. This should be fine if the lhs is executed
// first. However if the rhs is executed first, the lhs can bail,
// change type and become -0.0 while the rhs has already been
// optimized to not make a difference between zero and negative zero.
MDefinition* lhs = use_def->toSub()->lhs();
MDefinition* rhs = use_def->toSub()->rhs();
if (rhs->id() < lhs->id() && CanProduceNegativeZero(lhs)) {
return true;
}
[[fallthrough]];
}
case MDefinition::Opcode::StoreElement:
case MDefinition::Opcode::StoreHoleValueElement:
case MDefinition::Opcode::LoadElement:
case MDefinition::Opcode::LoadElementHole:
case MDefinition::Opcode::LoadUnboxedScalar:
case MDefinition::Opcode::LoadDataViewElement:
case MDefinition::Opcode::LoadTypedArrayElementHole:
case MDefinition::Opcode::CharCodeAt:
case MDefinition::Opcode::Mod:
case MDefinition::Opcode::InArray:
// Only allowed to remove check when definition is the second operand
if (use_def->getOperand(0) == def) {
return true;
}
for (size_t i = 2, e = use_def->numOperands(); i < e; i++) {
if (use_def->getOperand(i) == def) {
return true;
}
}
break;
case MDefinition::Opcode::BoundsCheck:
// Only allowed to remove check when definition is the first operand
if (use_def->toBoundsCheck()->getOperand(1) == def) {
return true;
}
break;
case MDefinition::Opcode::ToString:
case MDefinition::Opcode::FromCharCode:
case MDefinition::Opcode::FromCodePoint:
case MDefinition::Opcode::TableSwitch:
case MDefinition::Opcode::Compare:
case MDefinition::Opcode::BitAnd:
case MDefinition::Opcode::BitOr:
case MDefinition::Opcode::BitXor:
case MDefinition::Opcode::Abs:
case MDefinition::Opcode::TruncateToInt32:
// Always allowed to remove check. No matter which operand.
break;
case MDefinition::Opcode::StoreElementHole:
case MDefinition::Opcode::StoreTypedArrayElementHole:
case MDefinition::Opcode::PostWriteElementBarrier:
// Only allowed to remove check when definition is the third operand.
for (size_t i = 0, e = use_def->numOperands(); i < e; i++) {
if (i == 2) {
continue;
}
if (use_def->getOperand(i) == def) {
return true;
}
}
break;
default:
return true;
}
}
return false;
}
#ifdef JS_JITSPEW
void MBinaryArithInstruction::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
switch (type()) {
case MIRType::Int32:
if (isDiv()) {
out.printf(" [%s]", toDiv()->isUnsigned() ? "uint32" : "int32");
} else if (isMod()) {
out.printf(" [%s]", toMod()->isUnsigned() ? "uint32" : "int32");
} else {
out.printf(" [int32]");
}
break;
case MIRType::Int64:
if (isDiv()) {
out.printf(" [%s]", toDiv()->isUnsigned() ? "uint64" : "int64");
} else if (isMod()) {
out.printf(" [%s]", toMod()->isUnsigned() ? "uint64" : "int64");
} else {
out.printf(" [int64]");
}
break;
case MIRType::Float32:
out.printf(" [float]");
break;
case MIRType::Double:
out.printf(" [double]");
break;
default:
break;
}
}
#endif
MDefinition* MRsh::foldsTo(TempAllocator& alloc) {
MDefinition* f = MBinaryBitwiseInstruction::foldsTo(alloc);
if (f != this) {
return f;
}
MDefinition* lhs = getOperand(0);
MDefinition* rhs = getOperand(1);
// It's probably OK to perform this optimization only for int32, as it will
// have the greatest effect for asm.js code that is compiled with the JS
// pipeline, and that code will not see int64 values.
if (!lhs->isLsh() || !rhs->isConstant() || rhs->type() != MIRType::Int32) {
return this;
}
if (!lhs->getOperand(1)->isConstant() ||
lhs->getOperand(1)->type() != MIRType::Int32) {
return this;
}
uint32_t shift = rhs->toConstant()->toInt32();
uint32_t shift_lhs = lhs->getOperand(1)->toConstant()->toInt32();
if (shift != shift_lhs) {
return this;
}
switch (shift) {
case 16:
return MSignExtendInt32::New(alloc, lhs->getOperand(0),
MSignExtendInt32::Half);
case 24:
return MSignExtendInt32::New(alloc, lhs->getOperand(0),
MSignExtendInt32::Byte);
}
return this;
}
MDefinition* MBinaryArithInstruction::foldsTo(TempAllocator& alloc) {
MOZ_ASSERT(IsNumberType(type()));
MOZ_ASSERT(!isDiv() && !isMod(), "Div and Mod don't call this method");
MDefinition* lhs = getOperand(0);
MDefinition* rhs = getOperand(1);
if (type() == MIRType::Int64) {
MOZ_ASSERT(!isTruncated());
if (MConstant* folded = EvaluateInt64ConstantOperands(alloc, this)) {
return folded;
}
if (IsConstantInt64(rhs, int64_t(getIdentity()))) {
return lhs; // x op id => x
}
if (isCommutative() && IsConstantInt64(lhs, int64_t(getIdentity()))) {
return rhs; // id op x => x
}
return this;
}
if (type() == MIRType::IntPtr) {
MOZ_ASSERT(!isTruncated());
if (MConstant* folded = EvaluateIntPtrConstantOperands(alloc, this)) {
return folded;
}
if (IsConstantIntPtr(rhs, intptr_t(getIdentity()))) {
return lhs; // x op id => x
}
if (isCommutative() && IsConstantIntPtr(lhs, intptr_t(getIdentity()))) {
return rhs; // id op x => x
}
return this;
}
// The remaining operations expect types representable as doubles.
MOZ_ASSERT(IsTypeRepresentableAsDouble(type()));
if (MConstant* folded = EvaluateConstantOperands(alloc, this)) {
if (isTruncated()) {
if (folded->type() != MIRType::Int32) {
if (!folded->block()) {
block()->insertBefore(this, folded);
}
return MTruncateToInt32::New(alloc, folded);
}
}
return folded;
}
if (MConstant* folded = EvaluateConstantNaNOperand(this)) {
MOZ_ASSERT(!isTruncated());
return folded;
}
if (mustPreserveNaN_) {
return this;
}
// 0 + -0 = 0. So we can't remove addition
if (isAdd() && type() != MIRType::Int32) {
return this;
}
if (IsConstant(rhs, getIdentity())) {
if (isTruncated()) {
return MTruncateToInt32::New(alloc, lhs);
}
return lhs;
}
// subtraction isn't commutative. So we can't remove subtraction when lhs
// equals 0
if (isSub()) {
return this;
}
if (IsConstant(lhs, getIdentity())) {
if (isTruncated()) {
return MTruncateToInt32::New(alloc, rhs);
}
return rhs; // id op x => x
}
return this;
}
void MBinaryArithInstruction::trySpecializeFloat32(TempAllocator& alloc) {
MOZ_ASSERT(IsNumberType(type()));
// Do not use Float32 if we can use integer types.
if (!IsFloatingPointType(type())) {
return;
}
if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
setResultType(MIRType::Float32);
}
}
void MMinMax::trySpecializeFloat32(TempAllocator& alloc) {
if (!IsFloatingPointType(type())) {
return;
}
MDefinition* left = lhs();
MDefinition* right = rhs();
if ((left->canProduceFloat32() ||
(left->isMinMax() && left->type() == MIRType::Float32)) &&
(right->canProduceFloat32() ||
(right->isMinMax() && right->type() == MIRType::Float32))) {
setResultType(MIRType::Float32);
} else {
ConvertOperandsToDouble(this, alloc);
}
}
template <MIRType Type>
static MConstant* EvaluateMinMaxInt(TempAllocator& alloc, MConstant* lhs,
MConstant* rhs, bool isMax) {
auto lnum = ToIntConstant<Type>(lhs);
auto rnum = ToIntConstant<Type>(rhs);
auto result = isMax ? std::max(lnum, rnum) : std::min(lnum, rnum);
return NewIntConstant<Type>(alloc, result);
}
static MConstant* EvaluateMinMax(TempAllocator& alloc, MConstant* lhs,
MConstant* rhs, bool isMax) {
MOZ_ASSERT(lhs->type() == rhs->type());
MOZ_ASSERT(IsNumberType(lhs->type()));
// The folded MConstant should maintain the same MIRType with the original
// inputs.
switch (lhs->type()) {
case MIRType::Int32:
return EvaluateMinMaxInt<MIRType::Int32>(alloc, lhs, rhs, isMax);
case MIRType::Int64:
return EvaluateMinMaxInt<MIRType::Int64>(alloc, lhs, rhs, isMax);
case MIRType::IntPtr:
return EvaluateMinMaxInt<MIRType::IntPtr>(alloc, lhs, rhs, isMax);
case MIRType::Float32:
case MIRType::Double: {
double lnum = lhs->numberToDouble();
double rnum = rhs->numberToDouble();
double result;
if (isMax) {
result = js::math_max_impl(lnum, rnum);
} else {
result = js::math_min_impl(lnum, rnum);
}
if (lhs->type() == MIRType::Float32) {
return MConstant::NewFloat32(alloc, result);
}
return MConstant::NewDouble(alloc, result);
}
default:
MOZ_CRASH("not a number type");
}
}
MDefinition* MMinMax::foldsTo(TempAllocator& alloc) {
MOZ_ASSERT(lhs()->type() == type());
MOZ_ASSERT(rhs()->type() == type());
if (lhs() == rhs()) {
return lhs();
}
auto foldConstants = [&alloc](MDefinition* lhs, MDefinition* rhs,
bool isMax) -> MConstant* {
return EvaluateMinMax(alloc, lhs->toConstant(), rhs->toConstant(), isMax);
};
auto foldLength = [](MDefinition* operand, MConstant* constant,
bool isMax) -> MDefinition* {
if (operand->isArrayLength() || operand->isArrayBufferViewLength() ||
operand->isArgumentsLength() || operand->isStringLength() ||
operand->isNonNegativeIntPtrToInt32()) {
bool isZeroOrNegative;
switch (constant->type()) {
case MIRType::Int32:
isZeroOrNegative = constant->toInt32() <= 0;
break;
case MIRType::IntPtr:
isZeroOrNegative = constant->toIntPtr() <= 0;
break;
default:
isZeroOrNegative = false;
break;
}
// (Array|ArrayBufferView|Arguments|String)Length is always >= 0.
// max(array.length, cte <= 0) = array.length
// min(array.length, cte <= 0) = cte
if (isZeroOrNegative) {
return isMax ? operand : constant;
}
}
return nullptr;
};
// Try to fold the following patterns when |x| and |y| are constants.
//
// min(min(x, z), min(y, z)) = min(min(x, y), z)
// max(max(x, z), max(y, z)) = max(max(x, y), z)
// max(min(x, z), min(y, z)) = min(max(x, y), z)
// min(max(x, z), max(y, z)) = max(min(x, y), z)
if (lhs()->isMinMax() && rhs()->isMinMax()) {
do {
auto* left = lhs()->toMinMax();
auto* right = rhs()->toMinMax();
if (left->isMax() != right->isMax()) {
break;
}
MDefinition* x;
MDefinition* y;
MDefinition* z;
if (left->lhs() == right->lhs()) {
std::tie(x, y, z) = std::tuple{left->rhs(), right->rhs(), left->lhs()};
} else if (left->lhs() == right->rhs()) {
std::tie(x, y, z) = std::tuple{left->rhs(), right->lhs(), left->lhs()};
} else if (left->rhs() == right->lhs()) {
std::tie(x, y, z) = std::tuple{left->lhs(), right->rhs(), left->rhs()};
} else if (left->rhs() == right->rhs()) {
std::tie(x, y, z) = std::tuple{left->lhs(), right->lhs(), left->rhs()};
} else {
break;
}
if (!x->isConstant() || !y->isConstant()) {
break;
}
if (auto* foldedCst = foldConstants(x, y, isMax())) {
if (auto* folded = foldLength(z, foldedCst, left->isMax())) {
return folded;
}
block()->insertBefore(this, foldedCst);
return MMinMax::New(alloc, foldedCst, z, type(), left->isMax());
}
} while (false);
}
// Fold min/max operations with same inputs.
if (lhs()->isMinMax() || rhs()->isMinMax()) {
auto* other = lhs()->isMinMax() ? lhs()->toMinMax() : rhs()->toMinMax();
auto* operand = lhs()->isMinMax() ? rhs() : lhs();
if (operand == other->lhs() || operand == other->rhs()) {
if (isMax() == other->isMax()) {
// min(x, min(x, y)) = min(x, y)
// max(x, max(x, y)) = max(x, y)
return other;
}
if (!IsFloatingPointType(type())) {
// When neither value is NaN:
// max(x, min(x, y)) = x
// min(x, max(x, y)) = x
// Ensure that any bailouts that we depend on to guarantee that |y| is
// Int32 are not removed.
auto* otherOp = operand == other->lhs() ? other->rhs() : other->lhs();
otherOp->setGuardRangeBailoutsUnchecked();
return operand;
}
}
}
if (!lhs()->isConstant() && !rhs()->isConstant()) {
return this;
}
// Directly apply math utility to compare the rhs() and lhs() when
// they are both constants.
if (lhs()->isConstant() && rhs()->isConstant()) {
if (auto* folded = foldConstants(lhs(), rhs(), isMax())) {
return folded;
}
}
MDefinition* operand = lhs()->isConstant() ? rhs() : lhs();
MConstant* constant =
lhs()->isConstant() ? lhs()->toConstant() : rhs()->toConstant();
if (operand->isToDouble() &&
operand->getOperand(0)->type() == MIRType::Int32) {
MOZ_ASSERT(constant->type() == MIRType::Double);
// min(int32, cte >= INT32_MAX) = int32
if (!isMax() && constant->toDouble() >= INT32_MAX) {
MLimitedTruncate* limit = MLimitedTruncate::New(
alloc, operand->getOperand(0), TruncateKind::NoTruncate);
block()->insertBefore(this, limit);
MToDouble* toDouble = MToDouble::New(alloc, limit);
return toDouble;
}
// max(int32, cte <= INT32_MIN) = int32
if (isMax() && constant->toDouble() <= INT32_MIN) {
MLimitedTruncate* limit = MLimitedTruncate::New(
alloc, operand->getOperand(0), TruncateKind::NoTruncate);
block()->insertBefore(this, limit);
MToDouble* toDouble = MToDouble::New(alloc, limit);
return toDouble;
}
}
if (auto* folded = foldLength(operand, constant, isMax())) {
return folded;
}
// Attempt to fold nested min/max operations which are produced by
// self-hosted built-in functions.
if (operand->isMinMax()) {
auto* other = operand->toMinMax();
MOZ_ASSERT(other->lhs()->type() == type());
MOZ_ASSERT(other->rhs()->type() == type());
MConstant* otherConstant = nullptr;
MDefinition* otherOperand = nullptr;
if (other->lhs()->isConstant()) {
otherConstant = other->lhs()->toConstant();
otherOperand = other->rhs();
} else if (other->rhs()->isConstant()) {
otherConstant = other->rhs()->toConstant();
otherOperand = other->lhs();
}
if (otherConstant) {
if (isMax() == other->isMax()) {
// Fold min(x, min(y, z)) to min(min(x, y), z) with constant min(x, y).
// Fold max(x, max(y, z)) to max(max(x, y), z) with constant max(x, y).
if (auto* left = foldConstants(constant, otherConstant, isMax())) {
if (auto* folded = foldLength(otherOperand, left, isMax())) {
return folded;
}
block()->insertBefore(this, left);
return MMinMax::New(alloc, left, otherOperand, type(), isMax());
}
} else {
// Fold min(x, max(y, z)) to max(min(x, y), min(x, z)).
// Fold max(x, min(y, z)) to min(max(x, y), max(x, z)).
//
// But only do this when min(x, z) can also be simplified.
if (auto* right = foldLength(otherOperand, constant, isMax())) {
if (auto* left = foldConstants(constant, otherConstant, isMax())) {
block()->insertBefore(this, left);
return MMinMax::New(alloc, left, right, type(), !isMax());
}
}
}
}
}
return this;
}
#ifdef JS_JITSPEW
void MMinMax::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" (%s)", isMax() ? "max" : "min");
}
void MMinMaxArray::printOpcode(GenericPrinter& out) const {
MDefinition::printOpcode(out);
out.printf(" (%s)", isMax() ? "max" : "min");
}
#endif
MDefinition* MPow::foldsConstant(TempAllocator& alloc) {
// Both `x` and `p` in `x^p` must be constants in order to precompute.
if (!input()->isConstant() || !power()->isConstant()) {
return nullptr;
}
if (!power()->toConstant()->isTypeRepresentableAsDouble()) {
return nullptr;
}
if (!input()->toConstant()->isTypeRepresentableAsDouble()) {
return nullptr;
}
double x = input()->toConstant()->numberToDouble();
double p = power()->toConstant()->numberToDouble();
double result = js::ecmaPow(x, p);
if (type() == MIRType::Int32) {
int32_t cast;
if (!mozilla::NumberIsInt32(result, &cast)) {
// Reject folding if the result isn't an int32, because we'll bail anyway.
return nullptr;
}
return MConstant::NewInt32(alloc, cast);
}
return MConstant::NewDouble(alloc, result);
}
MDefinition* MPow::foldsConstantPower(TempAllocator& alloc) {
// If `p` in `x^p` isn't constant, we can't apply these folds.
if (!power()->isConstant()) {
return nullptr;
}
if (!power()->toConstant()->isTypeRepresentableAsDouble()) {
return nullptr;
}
MOZ_ASSERT(type() == MIRType::Double || type() == MIRType::Int32);
// NOTE: The optimizations must match the optimizations used in |js::ecmaPow|
// resp. |js::powi| to avoid differential testing issues.
double pow = power()->toConstant()->numberToDouble();
// Math.pow(x, 0.5) is a sqrt with edge-case detection.
if (pow == 0.5) {
MOZ_ASSERT(type() == MIRType::Double);
return MPowHalf::New(alloc, input());
}
// Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5), even for edge cases.
if (pow == -0.5) {
MOZ_ASSERT(type() == MIRType::Double);
MPowHalf* half = MPowHalf::New(alloc, input());
block()->insertBefore(this, half);
MConstant* one = MConstant::NewDouble(alloc, 1.0);
block()->insertBefore(this, one);
return MDiv::New(alloc, one, half, MIRType::Double);
}
// Math.pow(x, 1) == x.
if (pow == 1.0) {
return input();
}
auto multiply = [this, &alloc](MDefinition* lhs, MDefinition* rhs) {
MMul* mul = MMul::New(alloc, lhs, rhs, type());
mul->setBailoutKind(bailoutKind());
// Multiplying the same number can't yield negative zero.
mul->setCanBeNegativeZero(lhs != rhs && canBeNegativeZero());
return mul;
};
// Math.pow(x, 2) == x*x.
if (pow == 2.0) {
return multiply(input(), input());
}
// Math.pow(x, 3) == x*x*x.
if (pow == 3.0) {
MMul* mul1 = multiply(input(), input());
block()->insertBefore(this, mul1);
return multiply(input(), mul1);
}
// Math.pow(x, 4) == y*y, where y = x*x.
if (pow == 4.0) {
MMul* y = multiply(input(), input());
block()->insertBefore(this, y);
return multiply(y, y);
}
// Math.pow(x, NaN) == NaN.
if (std::isnan(pow)) {
return power();
}
// No optimization
return nullptr;
}
MDefinition* MPow::foldsTo(TempAllocator& alloc) {
if (MDefinition* def = foldsConstant(alloc)) {
return def;
}
if (MDefinition* def = foldsConstantPower(alloc)) {
return def;
}
return this;
}
MDefinition* MBigIntPow::foldsTo(TempAllocator& alloc) {
auto* base = lhs();
MOZ_ASSERT(base->type() == MIRType::BigInt);
auto* power = rhs();
MOZ_ASSERT(power->type() == MIRType::BigInt);
// |power| must be a constant.
if (!power->isConstant()) {
return this;
}
int32_t pow;
if (BigInt::isInt32(power->toConstant()->toBigInt(), &pow)) {
// x ** 1n == x.
if (pow == 1) {
return base;
}
// x ** 2n == x*x.
if (pow == 2) {
auto* mul = MBigIntMul::New(alloc, base, base);
mul->setBailoutKind(bailoutKind());
return mul;
}
}
// No optimization
return this;
}
MDefinition* MBigIntAsIntN::foldsTo(TempAllocator& alloc) {
auto* bitsDef = bits();
if (!bitsDef->isConstant()) {
return this;
}
// Negative |bits| throw an error and too large |bits| don't fit into Int64.
int32_t bitsInt = bitsDef->toConstant()->toInt32();
if (bitsInt < 0 || bitsInt > 64) {
return this;
}
// Prefer sign-extension if possible.
bool canSignExtend = false;
switch (bitsInt) {
case 8:
case 16:
case 32:
case 64:
canSignExtend = true;
break;
}
// Ensure the input is either IntPtr or Int64 typed.
auto* inputDef = input();
if (inputDef->isIntPtrToBigInt()) {
inputDef = inputDef->toIntPtrToBigInt()->input();
if (!canSignExtend) {
auto* int64 = MIntPtrToInt64::New(alloc, inputDef);
block()->insertBefore(this, int64);
inputDef = int64;
}
} else if (inputDef->isInt64ToBigInt()) {
inputDef = inputDef->toInt64ToBigInt()->input();
} else {
auto* truncate = MTruncateBigIntToInt64::New(alloc, inputDef);
block()->insertBefore(this, truncate);
inputDef = truncate;
}
if (inputDef->type() == MIRType::IntPtr) {
MOZ_ASSERT(canSignExtend);
// If |bits| is larger-or-equal to |BigInt::DigitBits|, return the input.
if (size_t(bitsInt) >= BigInt::DigitBits) {
auto* limited = MIntPtrLimitedTruncate::New(alloc, inputDef);
block()->insertBefore(this, limited);
inputDef = limited;
} else {
MOZ_ASSERT(bitsInt < 64);
// Otherwise extension is the way to go.
MSignExtendIntPtr::Mode mode;
switch (bitsInt) {
case 8:
mode = MSignExtendIntPtr::Byte;
break;
case 16:
mode = MSignExtendIntPtr::Half;
break;
case 32:
mode = MSignExtendIntPtr::Word;
break;
}
auto* extend = MSignExtendIntPtr::New(alloc, inputDef, mode);
block()->insertBefore(this, extend);
inputDef = extend;
}
return MIntPtrToBigInt::New(alloc, inputDef);
}
MOZ_ASSERT(inputDef->type() == MIRType::Int64);
if (canSignExtend) {
// If |bits| is equal to 64, return the input.
if (bitsInt == 64) {
auto* limited = MInt64LimitedTruncate::New(alloc, inputDef);
block()->insertBefore(this, limited);
inputDef = limited;
} else {
MOZ_ASSERT(bitsInt < 64);
// Otherwise extension is the way to go.
MSignExtendInt64::Mode mode;
switch (bitsInt) {
case 8:
mode = MSignExtendInt64::Byte;
break;
case 16:
mode = MSignExtendInt64::Half;
break;
case 32:
mode = MSignExtendInt64::Word;
break;
}
auto* extend = MSignExtendInt64::New(alloc, inputDef, mode);
block()->insertBefore(this, extend);
inputDef = extend;
}
} else {
MOZ_ASSERT(bitsInt < 64);
uint64_t mask = 0;
if (bitsInt > 0) {
mask = uint64_t(-1) >> (64 - bitsInt);
}
auto* cst = MConstant::NewInt64(alloc, int64_t(mask));
block()->insertBefore(this, cst);
// Mask off any excess bits.
auto* bitAnd = MBitAnd::New(alloc, inputDef, cst, MIRType::Int64);
block()->insertBefore(this, bitAnd);
auto* shift = MConstant::NewInt64(alloc, int64_t(64 - bitsInt));
block()->insertBefore(this, shift);
// Left-shift to make the sign-bit the left-most bit.
auto* lsh = MLsh::New(alloc, bitAnd, shift, MIRType::Int64);
block()->insertBefore(this, lsh);
// Right-shift to propagate the sign-bit.
auto* rsh = MRsh::New(alloc, lsh, shift, MIRType::Int64);
block()->insertBefore(this, rsh);
inputDef = rsh;
}
return MInt64ToBigInt::New(alloc, inputDef, /* isSigned = */ true);
}
MDefinition* MBigIntAsUintN::foldsTo(TempAllocator& alloc) {
auto* bitsDef = bits();
if (!bitsDef->isConstant()) {
return this;
}
// Negative |bits| throw an error and too large |bits| don't fit into Int64.
int32_t bitsInt = bitsDef->toConstant()->toInt32();
if (bitsInt < 0 || bitsInt > 64) {
return this;
}
// Ensure the input is Int64 typed.
auto* inputDef = input();
if (inputDef->isIntPtrToBigInt()) {
inputDef = inputDef->toIntPtrToBigInt()->input();
auto* int64 = MIntPtrToInt64::New(alloc, inputDef);
block()->insertBefore(this, int64);
inputDef = int64;
} else if (inputDef->isInt64ToBigInt()) {
inputDef = inputDef->toInt64ToBigInt()->input();
} else {
auto* truncate = MTruncateBigIntToInt64::New(alloc, inputDef);
block()->insertBefore(this, truncate);
inputDef = truncate;
}
MOZ_ASSERT(inputDef->type() == MIRType::Int64);
if (bitsInt < 64) {
uint64_t mask = 0;
if (bitsInt > 0) {
mask = uint64_t(-1) >> (64 - bitsInt);
}
// Mask off any excess bits.
auto* cst = MConstant::NewInt64(alloc, int64_t(mask));
block()->insertBefore(this, cst);
auto* bitAnd = MBitAnd::New(alloc, inputDef, cst, MIRType::Int64);
block()->insertBefore(this, bitAnd);
inputDef = bitAnd;
}
return MInt64ToBigInt::New(alloc, inputDef, /* isSigned = */ false);
}
bool MBigIntPtrBinaryArithInstruction::isMaybeZero(MDefinition* ins) {
MOZ_ASSERT(ins->type() == MIRType::IntPtr);
if (ins->isBigIntToIntPtr()) {
ins = ins->toBigIntToIntPtr()->input();
}
if (ins->isConstant()) {
if (ins->type() == MIRType::IntPtr) {
return ins->toConstant()->toIntPtr() == 0;
}
MOZ_ASSERT(ins->type() == MIRType::BigInt);
return ins->toConstant()->toBigInt()->isZero();
}
return true;
}
bool MBigIntPtrBinaryArithInstruction::isMaybeNegative(MDefinition* ins) {
MOZ_ASSERT(ins->type() == MIRType::IntPtr);
if (ins->isBigIntToIntPtr()) {
ins = ins->toBigIntToIntPtr()->input();
}
if (ins->isConstant()) {
if (ins->type() == MIRType::IntPtr) {
return ins->toConstant()->toIntPtr() < 0;
}
MOZ_ASSERT(ins->type() == MIRType::BigInt);
return ins->toConstant()->toBigInt()->isNegative();
}
return true;
}
MDefinition* MBigIntPtrBinaryArithInstruction::foldsTo(TempAllocator& alloc) {
if (auto* folded = EvaluateIntPtrConstantOperands(alloc, this)) {
return folded;
}
return this;
}
MDefinition* MBigIntPtrPow::foldsTo(TempAllocator& alloc) {
// Follow MPow::foldsTo and fold if:
// 1. Both operands are constants.
// 2. The power operand is ≤ 4 and the operation can be expressed as a series
// of multiplications.
if (!rhs()->isConstant()) {
return this;
}
intptr_t pow = rhs()->toConstant()->toIntPtr();
if (lhs()->isConstant()) {
intptr_t base = lhs()->toConstant()->toIntPtr();
intptr_t result;
if (!BigInt::powIntPtr(base, pow, &result)) {
return this;
}
return MConstant::NewIntPtr(alloc, result);
}
if (pow == 1) {
return lhs();
}
auto multiply = [this, &alloc](MDefinition* lhs, MDefinition* rhs) {
auto* mul = MBigIntPtrMul::New(alloc, lhs, rhs);
mul->setBailoutKind(bailoutKind());
return mul;
};
// (x ** 2n) == x*x.
if (pow == 2) {
return multiply(lhs(), lhs());
}
// (x ** 3n) == x*x*x.
if (pow == 3) {
auto* mul1 = multiply(lhs(), lhs());
block()->insertBefore(this, mul1);
return multiply(lhs(), mul1);
}
// (x ** 4n) == y*y, where y = x*x.
if (pow == 4) {
auto* y = multiply(lhs(), lhs());
block()->insertBefore(this, y);
return multiply(y, y);
}
// No optimization
return this;
}
MDefinition* MBigIntPtrBinaryBitwiseInstruction::foldsTo(TempAllocator& alloc) {
if (auto* folded = EvaluateIntPtrConstantOperands(alloc, this)) {
return folded;
}
return this;
}
MDefinition* MBigIntPtrBitNot::foldsTo(TempAllocator& alloc) {
if (!input()->isConstant()) {
return this;
}
return MConstant::NewIntPtr(alloc, ~input()->toConstant()->toIntPtr());
}
MDefinition* MInt32ToIntPtr::foldsTo(TempAllocator& alloc) {
MDefinition* def = input();
if (def->isConstant()) {
int32_t i = def->toConstant()->toInt32();
return MConstant::NewIntPtr(alloc, intptr_t(i));
}
if (def->isNonNegativeIntPtrToInt32()) {
return def->toNonNegativeIntPtrToInt32()->input();
}
return this;
}
bool MAbs::fallible() const {
return !implicitTruncate_ && (!range() || !range()->hasInt32Bounds());
}
void MAbs::trySpecializeFloat32(TempAllocator& alloc) {
// Do not use Float32 if we can use int32.
if (input()->type() == MIRType::Int32) {
return;
}
if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
setResultType(MIRType::Float32);
}
}
MDefinition* MDiv::foldsTo(TempAllocator& alloc) {
MOZ_ASSERT(IsNumberType(type()));
MOZ_ASSERT(type() != MIRType::IntPtr, "not yet implemented");
if (type() == MIRType::Int64) {
if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
return folded;
}
return this;
}
if (MDefinition* folded = EvaluateConstantOperands(alloc, this)) {
return folded;
}
if (MDefinition* folded = EvaluateExactReciprocal(alloc, this)) {
return folded;
}
return this;
}
void MDiv::analyzeEdgeCasesForward() {
// This is only meaningful when doing integer division.
if (type() != MIRType::Int32) {
return;
}
MOZ_ASSERT(lhs()->type() == MIRType::Int32);
MOZ_ASSERT(rhs()->type() == MIRType::Int32);
// Try removing divide by zero check
if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0)) {
canBeDivideByZero_ = false;
}
// If lhs is a constant int != INT32_MIN, then
// negative overflow check can be skipped.
if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(INT32_MIN)) {
canBeNegativeOverflow_ = false;
}
// If rhs is a constant int != -1, likewise.
if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(-1)) {
canBeNegativeOverflow_ = false;
}
// If lhs is != 0, then negative zero check can be skipped.
if (lhs()->isConstant() && !lhs()->toConstant()->isInt32(0)) {
setCanBeNegativeZero(false);
}
// If rhs is >= 0, likewise.
if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
if (rhs()->toConstant()->toInt32() >= 0) {
setCanBeNegativeZero(false);
}
}
}
void MDiv::analyzeEdgeCasesBackward() {
// In general, canBeNegativeZero_ is only valid for integer divides.
// It's fine to access here because we're only using it to avoid
// wasting effort to decide whether we can clear an already cleared
// flag.
if (canBeNegativeZero_ && !NeedNegativeZeroCheck(this)) {
setCanBeNegativeZero(false);
}
}
bool MDiv::fallible() const { return !isTruncated(); }
MDefinition* MMod::foldsTo(TempAllocator& alloc) {
MOZ_ASSERT(IsNumberType(type()));
MOZ_ASSERT(type() != MIRType::IntPtr, "not yet implemented");
if (type() == MIRType::Int64) {
if (MDefinition* folded = EvaluateInt64ConstantOperands(alloc, this)) {
return folded;
}
} else {
if (MDefinition* folded = EvaluateConstantOperands(alloc, this)) {
return folded;
}
}
return this;
}
void MMod::analyzeEdgeCasesForward() {
// These optimizations make sense only for integer division
if (type() != MIRType::Int32) {
return;
}
if (rhs()->isConstant() && !rhs()->toConstant()->isInt32(0)) {
canBeDivideByZero_ = false;
}
if (rhs()->isConstant()) {
int32_t n = rhs()->toConstant()->toInt32();
if (n > 0 && !IsPowerOfTwo(uint32_t(n))) {
canBePowerOfTwoDivisor_ = false;
}
}
}
bool MMod::fallible() const {
return !isTruncated() &&
(isUnsigned() || canBeDivideByZero() || canBeNegativeDividend());
}
void MMathFunction::trySpecializeFloat32(TempAllocator& alloc) {
if (EnsureFloatConsumersAndInputOrConvert(this, alloc)) {
setResultType(MIRType::Float32);
specialization_ = MIRType::Float32;
}
}
bool MMathFunction::isFloat32Commutative() const {
switch (function_) {
case UnaryMathFunction::Floor:
case UnaryMathFunction::Ceil:
case UnaryMathFunction::Round:
case UnaryMathFunction::Trunc:
return true;
default:
return false;
}
}
MHypot* MHypot::New(TempAllocator& alloc, const MDefinitionVector& vector) {
uint32_t length = vector.length();
MHypot* hypot = new (alloc) MHypot;
if (!hypot->init(alloc, length)) {
return nullptr;
}
for (uint32_t i = 0; i < length; ++i) {
hypot->initOperand(i, vector[i]);
}
return hypot;
}
bool MAdd::fallible() const {
// the add is fallible if range analysis does not say that it is finite, AND
// either the truncation analysis shows that there are non-truncated uses.
if (truncateKind() >= TruncateKind::IndirectTruncate) {
return false;
}
if (range() && range()->hasInt32Bounds()) {
return false;
}
return true;
}
bool MSub::fallible() const {
// see comment in MAdd::fallible()
if (truncateKind() >= TruncateKind::IndirectTruncate) {
return false;
}
if (range() && range()->hasInt32Bounds()) {
return false;
}
return true;
}
MDefinition* MSub::foldsTo(TempAllocator& alloc) {
MDefinition* out = MBinaryArithInstruction::foldsTo(alloc);
if (out != this) {
return out;
}
// Optimize X - X to 0. This optimization is only valid for integer values.
// Subtracting a floating point value from itself returns NaN when the operand
// is either Infinity or NaN.
if (lhs() == rhs()) {
switch (type()) {
case MIRType::Int32:
// Ensure that any bailouts that we depend on to guarantee that X
// is Int32 are not removed.
lhs()->setGuardRangeBailoutsUnchecked();
return MConstant::NewInt32(alloc, 0);
case MIRType::Int64:
return MConstant::NewInt64(alloc, 0);
case MIRType::IntPtr:
return MConstant::NewIntPtr(alloc, 0);
default:
MOZ_ASSERT(IsFloatingPointType(type()));
}
}
return this;
}
MDefinition* MMul::foldsTo(TempAllocator& alloc) {
MDefinition* out = MBinaryArithInstruction::foldsTo(alloc);
if (out != this) {
return out;
}
if (type() != MIRType::Int32) {
return this;
}
if (lhs() == rhs()) {
setCanBeNegativeZero(false);
}
return this;
}
void MMul::analyzeEdgeCasesForward() {
// Try to remove the check for negative zero
// This only makes sense when using the integer multiplication
if (type() != MIRType::Int32) {
return;
}
// If lhs is > 0, no need for negative zero check.
if (lhs()->isConstant() && lhs()->type() == MIRType::Int32) {
if (lhs()->toConstant()->toInt32() > 0) {
setCanBeNegativeZero(false);
}
}
// If rhs is > 0, likewise.
if (rhs()->isConstant() && rhs()->type() == MIRType::Int32) {
if (rhs()->toConstant()->toInt32() > 0) {
setCanBeNegativeZero(false);
}
}
}
void MMul::analyzeEdgeCasesBackward() {
if (canBeNegativeZero() && !NeedNegativeZeroCheck(this)) {
setCanBeNegativeZero(false);
}
}
bool MMul::canOverflow() const {
if (isTruncated()) {
return false;
}
return !range() || !range()->hasInt32Bounds();
}
bool MUrsh::fallible() const {
if (bailoutsDisabled()) {
return false;
}
return !range() || !range()->hasInt32Bounds();
}
static inline bool MustBeUInt32(MDefinition* def, MDefinition** pwrapped) {
if (def->isUrsh()) {
*pwrapped = def->toUrsh()->lhs();
MDefinition* rhs = def->toUrsh()->rhs();
return def->toUrsh()->bailoutsDisabled() && rhs->maybeConstantValue() &&
rhs->maybeConstantValue()->isInt32(0);
}
if (MConstant* defConst = def->maybeConstantValue()) {
*pwrapped = defConst;
return defConst->type() == MIRType::Int32 && defConst->toInt32() >= 0;
}
*pwrapped = nullptr; // silence GCC warning
return false;
}
/* static */
bool MBinaryInstruction::unsignedOperands(MDefinition* left,
MDefinition* right) {
MDefinition* replace;
if (!MustBeUInt32(left, &replace)) {
return false;
}
if (replace->type() != MIRType::Int32) {
return false;
}
if (!MustBeUInt32(right, &replace)) {
return false;
}
if (replace->type() != MIRType::Int32) {
return false;
}
return true;
}
bool MBinaryInstruction::unsignedOperands() {
return unsignedOperands(getOperand(0), getOperand(1));
}
void MBinaryInstruction::replaceWithUnsignedOperands() {
MOZ_ASSERT(unsignedOperands());
for (size_t i = 0; i < numOperands(); i++) {
MDefinition* replace;
MustBeUInt32(getOperand(i), &replace);
if (replace == getOperand(i)) {
continue;
}
getOperand(i)->setImplicitlyUsedUnchecked();
replaceOperand(i, replace);
}
}
MDefinition* MBitNot::foldsTo(TempAllocator& alloc) {
if (type() == MIRType::Int64) {
return this;
}
MOZ_ASSERT(type() == MIRType::Int32);
MDefinition* input = getOperand(0);
if (input->isConstant()) {
int32_t v = ~(input->toConstant()->toInt32());
return MConstant::NewInt32(alloc, v);
}