Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/RangeAnalysis.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jsmath.h"
#include "jit/CompileInfo.h"
#include "jit/IonAnalysis.h"
#include "jit/JitSpewer.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
#include "js/Conversions.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/CheckedArithmetic.h"
#include "vm/ArgumentsObject.h"
#include "vm/TypedArrayObject.h"
#include "vm/Uint8Clamped.h"
#include "vm/BytecodeUtil-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using JS::ToInt32;
using mozilla::Abs;
using mozilla::CountLeadingZeroes32;
using mozilla::ExponentComponent;
using mozilla::FloorLog2;
using mozilla::IsNegativeZero;
using mozilla::NegativeInfinity;
using mozilla::NumberEqualsInt32;
using mozilla::PositiveInfinity;
// [SMDOC] IonMonkey Range Analysis
//
// This algorithm is based on the paper "Eliminating Range Checks Using
// Static Single Assignment Form" by Gough and Klaren.
//
// We associate a range object with each SSA name, and the ranges are consulted
// in order to determine whether overflow is possible for arithmetic
// computations.
//
// An important source of range information that requires care to take
// advantage of is conditional control flow. Consider the code below:
//
// if (x < 0) {
// y = x + 2000000000;
// } else {
// if (x < 1000000000) {
// y = x * 2;
// } else {
// y = x - 3000000000;
// }
// }
//
// The arithmetic operations in this code cannot overflow, but it is not
// sufficient to simply associate each name with a range, since the information
// differs between basic blocks. The traditional dataflow approach would be
// associate ranges with (name, basic block) pairs. This solution is not
// satisfying, since we lose the benefit of SSA form: in SSA form, each
// definition has a unique name, so there is no need to track information about
// the control flow of the program.
//
// The approach used here is to add a new form of pseudo operation called a
// beta node, which associates range information with a value. These beta
// instructions take one argument and additionally have an auxiliary constant
// range associated with them. Operationally, beta nodes are just copies, but
// the invariant expressed by beta node copies is that the output will fall
// inside the range given by the beta node. Gough and Klaeren refer to SSA
// extended with these beta nodes as XSA form. The following shows the example
// code transformed into XSA form:
//
// if (x < 0) {
// x1 = Beta(x, [INT_MIN, -1]);
// y1 = x1 + 2000000000;
// } else {
// x2 = Beta(x, [0, INT_MAX]);
// if (x2 < 1000000000) {
// x3 = Beta(x2, [INT_MIN, 999999999]);
// y2 = x3*2;
// } else {
// x4 = Beta(x2, [1000000000, INT_MAX]);
// y3 = x4 - 3000000000;
// }
// y4 = Phi(y2, y3);
// }
// y = Phi(y1, y4);
//
// We insert beta nodes for the purposes of range analysis (they might also be
// usefully used for other forms of bounds check elimination) and remove them
// after range analysis is performed. The remaining compiler phases do not ever
// encounter beta nodes.
static bool IsDominatedUse(MBasicBlock* block, MUse* use) {
MNode* n = use->consumer();
bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
if (isPhi) {
MPhi* phi = n->toDefinition()->toPhi();
return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
}
return block->dominates(n->block());
}
static inline void SpewRange(MDefinition* def) {
#ifdef JS_JITSPEW
if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
def->range()) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" ");
def->printName(out);
out.printf(" has range ");
def->range()->dump(out);
out.printf("\n");
}
#endif
}
#ifdef JS_JITSPEW
static const char* TruncateKindString(TruncateKind kind) {
switch (kind) {
case TruncateKind::NoTruncate:
return "NoTruncate";
case TruncateKind::TruncateAfterBailouts:
return "TruncateAfterBailouts";
case TruncateKind::IndirectTruncate:
return "IndirectTruncate";
case TruncateKind::Truncate:
return "Truncate";
default:
MOZ_CRASH("Unknown truncate kind.");
}
}
static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
bool shouldClone) {
if (JitSpewEnabled(JitSpew_Range)) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" ");
out.printf("truncating ");
def->printName(out);
out.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind),
shouldClone);
}
}
#else
static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
bool shouldClone) {}
#endif
TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
void RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig,
MDefinition* dom,
MBasicBlock* block) {
for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
MUse* use = *i++;
if (use->consumer() != dom && IsDominatedUse(block, use)) {
use->replaceProducer(dom);
}
}
}
bool RangeAnalysis::addBetaNodes() {
JitSpew(JitSpew_Range, "Adding beta nodes");
for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
MBasicBlock* block = *i;
JitSpew(JitSpew_Range, "Looking at block %u", block->id());
BranchDirection branch_dir;
MTest* test = block->immediateDominatorBranch(&branch_dir);
if (!test || !test->getOperand(0)->isCompare()) {
continue;
}
MCompare* compare = test->getOperand(0)->toCompare();
if (!compare->isNumericComparison()) {
continue;
}
// TODO: support unsigned comparisons
if (compare->compareType() == MCompare::Compare_UInt32) {
continue;
}
// isNumericComparison should return false for UIntPtr.
MOZ_ASSERT(compare->compareType() != MCompare::Compare_UIntPtr);
MDefinition* left = compare->getOperand(0);
MDefinition* right = compare->getOperand(1);
double bound;
double conservativeLower = NegativeInfinity<double>();
double conservativeUpper = PositiveInfinity<double>();
MDefinition* val = nullptr;
JSOp jsop = compare->jsop();
if (branch_dir == FALSE_BRANCH) {
jsop = NegateCompareOp(jsop);
conservativeLower = GenericNaN();
conservativeUpper = GenericNaN();
}
MConstant* leftConst = left->maybeConstantValue();
MConstant* rightConst = right->maybeConstantValue();
if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
bound = leftConst->numberToDouble();
val = right;
jsop = ReverseCompareOp(jsop);
} else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
bound = rightConst->numberToDouble();
val = left;
} else if (left->type() == MIRType::Int32 &&
right->type() == MIRType::Int32) {
MDefinition* smaller = nullptr;
MDefinition* greater = nullptr;
if (jsop == JSOp::Lt) {
smaller = left;
greater = right;
} else if (jsop == JSOp::Gt) {
smaller = right;
greater = left;
}
if (smaller && greater) {
if (!alloc().ensureBallast()) {
return false;
}
MBeta* beta;
beta = MBeta::New(
alloc(), smaller,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(smaller, beta, block);
JitSpew(JitSpew_Range, " Adding beta node for smaller %u",
smaller->id());
beta = MBeta::New(
alloc(), greater,
Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(greater, beta, block);
JitSpew(JitSpew_Range, " Adding beta node for greater %u",
greater->id());
}
continue;
} else {
continue;
}
// At this point, one of the operands if the compare is a constant, and
// val is the other operand.
MOZ_ASSERT(val);
Range comp;
switch (jsop) {
case JSOp::Le:
comp.setDouble(conservativeLower, bound);
break;
case JSOp::Lt:
// For integers, if x < c, the upper bound of x is c-1.
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) &&
SafeSub(intbound, 1, &intbound)) {
bound = intbound;
}
}
comp.setDouble(conservativeLower, bound);
// Negative zero is not less than zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
}
break;
case JSOp::Ge:
comp.setDouble(bound, conservativeUpper);
break;
case JSOp::Gt:
// For integers, if x > c, the lower bound of x is c+1.
if (val->type() == MIRType::Int32) {
int32_t intbound;
if (NumberEqualsInt32(bound, &intbound) &&
SafeAdd(intbound, 1, &intbound)) {
bound = intbound;
}
}
comp.setDouble(bound, conservativeUpper);
// Negative zero is not greater than zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
}
break;
case JSOp::StrictEq:
case JSOp::Eq:
comp.setDouble(bound, bound);
break;
case JSOp::StrictNe:
case JSOp::Ne:
// Negative zero is not not-equal to zero.
if (bound == 0) {
comp.refineToExcludeNegativeZero();
break;
}
continue; // well, we could have
// [-\inf, bound-1] U [bound+1, \inf] but we only use
// contiguous ranges.
default:
continue;
}
if (JitSpewEnabled(JitSpew_Range)) {
JitSpewHeader(JitSpew_Range);
Fprinter& out = JitSpewPrinter();
out.printf(" Adding beta node for %u with range ", val->id());
comp.dump(out);
out.printf("\n");
}
if (!alloc().ensureBallast()) {
return false;
}
MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
block->insertBefore(*block->begin(), beta);
replaceDominatedUsesWith(val, beta, block);
}
return true;
}
bool RangeAnalysis::removeBetaNodes() {
JitSpew(JitSpew_Range, "Removing beta nodes");
for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
MBasicBlock* block = *i;
for (MDefinitionIterator iter(*i); iter;) {
MDefinition* def = *iter++;
if (def->isBeta()) {
auto* beta = def->toBeta();
MDefinition* op = beta->input();
JitSpew(JitSpew_Range, " Removing beta node %u for %u", beta->id(),
op->id());
beta->justReplaceAllUsesWith(op);
block->discard(beta);
} else {
// We only place Beta nodes at the beginning of basic
// blocks, so if we see something else, we can move on
// to the next block.
break;
}
}
}
return true;
}
void SymbolicBound::dump(GenericPrinter& out) const {
if (loop) {
out.printf("[loop] ");
}
sum.dump(out);
}
void SymbolicBound::dump() const {
Fprinter out(stderr);
dump(out);
out.printf("\n");
out.finish();
}
// Test whether the given range's exponent tells us anything that its lower
// and upper bound values don't.
static bool IsExponentInteresting(const Range* r) {
// If it lacks either a lower or upper bound, the exponent is interesting.
if (!r->hasInt32Bounds()) {
return true;
}
// Otherwise if there's no fractional part, the lower and upper bounds,
// which are integers, are perfectly precise.
if (!r->canHaveFractionalPart()) {
return false;
}
// Otherwise, if the bounds are conservatively rounded across a power-of-two
// boundary, the exponent may imply a tighter range.
return FloorLog2(std::max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
}
void Range::dump(GenericPrinter& out) const {
assertInvariants();
// Floating-point or Integer subset.
if (canHaveFractionalPart_) {
out.printf("F");
} else {
out.printf("I");
}
out.printf("[");
if (!hasInt32LowerBound_) {
out.printf("?");
} else {
out.printf("%d", lower_);
}
if (symbolicLower_) {
out.printf(" {");
symbolicLower_->dump(out);
out.printf("}");
}
out.printf(", ");
if (!hasInt32UpperBound_) {
out.printf("?");
} else {
out.printf("%d", upper_);
}
if (symbolicUpper_) {
out.printf(" {");
symbolicUpper_->dump(out);
out.printf("}");
}
out.printf("]");
bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
bool includesNegativeInfinity =
max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
bool includesPositiveInfinity =
max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
bool includesNegativeZero = canBeNegativeZero_;
if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
includesNegativeZero) {
out.printf(" (");
bool first = true;
if (includesNaN) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U NaN");
}
if (includesNegativeInfinity) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U -Infinity");
}
if (includesPositiveInfinity) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U Infinity");
}
if (includesNegativeZero) {
if (first) {
first = false;
} else {
out.printf(" ");
}
out.printf("U -0");
}
out.printf(")");
}
if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this)) {
out.printf(" (< pow(2, %d+1))", max_exponent_);
}
}
void Range::dump() const {
Fprinter out(stderr);
dump(out);
out.printf("\n");
out.finish();
}
Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
const Range* rhs, bool* emptyRange) {
*emptyRange = false;
if (!lhs && !rhs) {
return nullptr;
}
if (!lhs) {
return new (alloc) Range(*rhs);
}
if (!rhs) {
return new (alloc) Range(*lhs);
}
int32_t newLower = std::max(lhs->lower_, rhs->lower_);
int32_t newUpper = std::min(lhs->upper_, rhs->upper_);
// If upper < lower, then we have conflicting constraints. Consider:
//
// if (x < 0) {
// if (x > 0) {
// [Some code.]
// }
// }
//
// In this case, the block is unreachable.
if (newUpper < newLower) {
// If both ranges can be NaN, the result can still be NaN.
if (!lhs->canBeNaN() || !rhs->canBeNaN()) {
*emptyRange = true;
}
return nullptr;
}
bool newHasInt32LowerBound =
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
bool newHasInt32UpperBound =
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ && rhs->canBeNegativeZero_);
uint16_t newExponent = std::min(lhs->max_exponent_, rhs->max_exponent_);
// NaN is a special value which is neither greater than infinity or less than
// negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
// can end up thinking we have both a lower and upper bound, even though NaN
// is still possible. In this case, just be conservative, since any case where
// we can have NaN is not especially interesting.
if (newHasInt32LowerBound && newHasInt32UpperBound &&
newExponent == IncludesInfinityAndNaN) {
return nullptr;
}
// If one of the ranges has a fractional part and the other doesn't, it's
// possible that we will have computed a newExponent that's more precise
// than our newLower and newUpper. This is unusual, so we handle it here
// instead of in optimize().
//
// For example, consider the range F[0,1.5]. Range analysis represents the
// lower and upper bound as integers, so we'd actually have
// F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
// more precise upper bound than the integer upper bound.
//
// When intersecting such a range with an integer range, the fractional part
// of the range is dropped. The max exponent of 0 remains valid, so the
// upper bound needs to be adjusted to 1.
//
// When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
// the naive intersection is I[2,2], but since the max exponent tells us
// that the value is always less than 2, the intersection is actually empty.
if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
(lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
newHasInt32UpperBound && newLower == newUpper)) {
refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
&newUpper, &newHasInt32UpperBound);
// If we're intersecting two ranges that don't overlap, this could also
// push the bounds past each other, since the actual intersection is
// the empty set.
if (newLower > newUpper) {
*emptyRange = true;
return nullptr;
}
}
return new (alloc)
Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
}
void Range::unionWith(const Range* other) {
int32_t newLower = std::min(lower_, other->lower_);
int32_t newUpper = std::max(upper_, other->upper_);
bool newHasInt32LowerBound =
hasInt32LowerBound_ && other->hasInt32LowerBound_;
bool newHasInt32UpperBound =
hasInt32UpperBound_ && other->hasInt32UpperBound_;
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
canHaveFractionalPart_ || other->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
uint16_t newExponent = std::max(max_exponent_, other->max_exponent_);
rawInitialize(newLower, newHasInt32LowerBound, newUpper,
newHasInt32UpperBound, newCanHaveFractionalPart,
newMayIncludeNegativeZero, newExponent);
}
Range::Range(const MDefinition* def)
: symbolicLower_(nullptr), symbolicUpper_(nullptr) {
if (const Range* other = def->range()) {
// The instruction has range information; use it.
*this = *other;
// Simulate the effect of converting the value to its type.
// Note: we cannot clamp here, since ranges aren't allowed to shrink
// and truncation can increase range again. So doing wrapAround to
// mimick a possible truncation.
switch (def->type()) {
case MIRType::Int32:
// MToNumberInt32 cannot truncate. So we can safely clamp.
if (def->isToNumberInt32()) {
clampToInt32();
} else {
wrapAroundToInt32();
}
break;
case MIRType::Boolean:
wrapAroundToBoolean();
break;
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
break;
}
} else {
// Otherwise just use type information. We can trust the type here
// because we don't care what value the instruction actually produces,
// but what value we might get after we get past the bailouts.
switch (def->type()) {
case MIRType::Int32:
setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
break;
case MIRType::Boolean:
setInt32(0, 1);
break;
case MIRType::None:
MOZ_CRASH("Asking for the range of an instruction with no value");
default:
setUnknown();
break;
}
}
// As a special case, MUrsh is permitted to claim a result type of
// MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
// bailouts. If range analysis hasn't ruled out values in
// (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
// use as either a uint32 or an int32.
if (!hasInt32UpperBound() && def->isUrsh() &&
def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
lower_ = INT32_MIN;
}
assertInvariants();
}
static uint16_t ExponentImpliedByDouble(double d) {
// Handle the special values.
if (std::isnan(d)) {
return Range::IncludesInfinityAndNaN;
}
if (std::isinf(d)) {
return Range::IncludesInfinity;
}
// Otherwise take the exponent part and clamp it at zero, since the Range
// class doesn't track fractional ranges.
return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d)));
}
void Range::setDouble(double l, double h) {
MOZ_ASSERT(!(l > h));
// Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
if (l >= INT32_MIN && l <= INT32_MAX) {
lower_ = int32_t(::floor(l));
hasInt32LowerBound_ = true;
} else if (l >= INT32_MAX) {
lower_ = INT32_MAX;
hasInt32LowerBound_ = true;
} else {
lower_ = INT32_MIN;
hasInt32LowerBound_ = false;
}
if (h >= INT32_MIN && h <= INT32_MAX) {
upper_ = int32_t(::ceil(h));
hasInt32UpperBound_ = true;
} else if (h <= INT32_MIN) {
upper_ = INT32_MIN;
hasInt32UpperBound_ = true;
} else {
upper_ = INT32_MAX;
hasInt32UpperBound_ = false;
}
// Infer max_exponent_.
uint16_t lExp = ExponentImpliedByDouble(l);
uint16_t hExp = ExponentImpliedByDouble(h);
max_exponent_ = std::max(lExp, hExp);
canHaveFractionalPart_ = ExcludesFractionalParts;
canBeNegativeZero_ = ExcludesNegativeZero;
// Infer the canHaveFractionalPart_ setting. We can have a
// fractional part if the range crosses through the neighborhood of zero. We
// won't have a fractional value if the value is always beyond the point at
// which double precision can't represent fractional values.
uint16_t minExp = std::min(lExp, hExp);
bool includesNegative = std::isnan(l) || l < 0;
bool includesPositive = std::isnan(h) || h > 0;
bool crossesZero = includesNegative && includesPositive;
if (crossesZero || minExp < MaxTruncatableExponent) {
canHaveFractionalPart_ = IncludesFractionalParts;
}
// Infer the canBeNegativeZero_ setting. We can have a negative zero if
// either bound is zero.
if (!(l > 0) && !(h < 0)) {
canBeNegativeZero_ = IncludesNegativeZero;
}
optimize();
}
void Range::setDoubleSingleton(double d) {
setDouble(d, d);
// The above setDouble call is for comparisons, and treats negative zero
// as equal to zero. We're aiming for a minimum range, so we can clear the
// negative zero flag if the value isn't actually negative zero.
if (!IsNegativeZero(d)) {
canBeNegativeZero_ = ExcludesNegativeZero;
}
assertInvariants();
}
static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
}
Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) {
l = NoInt32LowerBound;
}
int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) {
h = NoInt32UpperBound;
}
// The exponent is at most one greater than the greater of the operands'
// exponents, except for NaN and infinity cases.
uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
if (e <= Range::MaxFiniteExponent) {
++e;
}
// Infinity + -Infinity is NaN.
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
e = Range::IncludesInfinityAndNaN;
}
return new (alloc) Range(
l, h,
FractionalPartFlag(lhs->canHaveFractionalPart() ||
rhs->canHaveFractionalPart()),
NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
e);
}
Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) {
l = NoInt32LowerBound;
}
int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) {
h = NoInt32UpperBound;
}
// The exponent is at most one greater than the greater of the operands'
// exponents, except for NaN and infinity cases.
uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
if (e <= Range::MaxFiniteExponent) {
++e;
}
// Infinity - Infinity is NaN.
if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
e = Range::IncludesInfinityAndNaN;
}
return new (alloc)
Range(l, h,
FractionalPartFlag(lhs->canHaveFractionalPart() ||
rhs->canHaveFractionalPart()),
NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
}
Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// If both numbers can be negative, result can be negative in the whole range
if (lhs->lower() < 0 && rhs->lower() < 0) {
return Range::NewInt32Range(alloc, INT32_MIN,
std::max(lhs->upper(), rhs->upper()));
}
// Only one of both numbers can be negative.
// - result can't be negative
// - Upper bound is minimum of both upper range,
int32_t lower = 0;
int32_t upper = std::min(lhs->upper(), rhs->upper());
// EXCEPT when upper bound of non negative number is max value,
// because negative value can return the whole max value.
// -1 & 5 = 5
if (lhs->lower() < 0) {
upper = rhs->upper();
}
if (rhs->lower() < 0) {
upper = lhs->upper();
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// When one operand is always 0 or always -1, it's a special case where we
// can compute a fully precise result. Handling these up front also
// protects the code below from calling CountLeadingZeroes32 with a zero
// operand or from shifting an int32_t by 32.
if (lhs->lower() == lhs->upper()) {
if (lhs->lower() == 0) {
return new (alloc) Range(*rhs);
}
if (lhs->lower() == -1) {
return new (alloc) Range(*lhs);
}
}
if (rhs->lower() == rhs->upper()) {
if (rhs->lower() == 0) {
return new (alloc) Range(*lhs);
}
if (rhs->lower() == -1) {
return new (alloc) Range(*rhs);
}
}
// The code below uses CountLeadingZeroes32, which has undefined behavior
// if its operand is 0. We rely on the code above to protect it.
MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
int32_t lower = INT32_MIN;
int32_t upper = INT32_MAX;
if (lhs->lower() >= 0 && rhs->lower() >= 0) {
// Both operands are non-negative, so the result won't be less than either.
lower = std::max(lhs->lower(), rhs->lower());
// The result will have leading zeros where both operands have leading
// zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
// account for the bit of sign.
upper = int32_t(UINT32_MAX >> std::min(CountLeadingZeroes32(lhs->upper()),
CountLeadingZeroes32(rhs->upper())));
} else {
// The result will have leading ones where either operand has leading ones.
if (lhs->upper() < 0) {
unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
upper = -1;
}
if (rhs->upper() < 0) {
unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
upper = -1;
}
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
int32_t lhsLower = lhs->lower();
int32_t lhsUpper = lhs->upper();
int32_t rhsLower = rhs->lower();
int32_t rhsUpper = rhs->upper();
bool invertAfter = false;
// If either operand is negative, bitwise-negate it, and arrange to negate
// the result; ~((~x)^y) == x^y. If both are negative the negations on the
// result cancel each other out; effectively this is (~x)^(~y) == x^y.
// These transformations reduce the number of cases we have to handle below.
if (lhsUpper < 0) {
lhsLower = ~lhsLower;
lhsUpper = ~lhsUpper;
std::swap(lhsLower, lhsUpper);
invertAfter = !invertAfter;
}
if (rhsUpper < 0) {
rhsLower = ~rhsLower;
rhsUpper = ~rhsUpper;
std::swap(rhsLower, rhsUpper);
invertAfter = !invertAfter;
}
// Handle cases where lhs or rhs is always zero specially, because they're
// easy cases where we can be perfectly precise, and because it protects the
// CountLeadingZeroes32 calls below from seeing 0 operands, which would be
// undefined behavior.
int32_t lower = INT32_MIN;
int32_t upper = INT32_MAX;
if (lhsLower == 0 && lhsUpper == 0) {
upper = rhsUpper;
lower = rhsLower;
} else if (rhsLower == 0 && rhsUpper == 0) {
upper = lhsUpper;
lower = lhsLower;
} else if (lhsLower >= 0 && rhsLower >= 0) {
// Both operands are non-negative. The result will be non-negative.
lower = 0;
// To compute the upper value, take each operand's upper value and
// set all bits that don't correspond to leading zero bits in the
// other to one. For each one, this gives an upper bound for the
// result, so we can take the minimum between the two.
unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
upper = std::min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
}
// If we bitwise-negated one (but not both) of the operands above, apply the
// bitwise-negate to the result, completing ~((~x)^y) == x^y.
if (invertAfter) {
lower = ~lower;
upper = ~upper;
std::swap(lower, upper);
}
return Range::NewInt32Range(alloc, lower, upper);
}
Range* Range::not_(TempAllocator& alloc, const Range* op) {
MOZ_ASSERT(op->isInt32());
return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
}
Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
(lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
(rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
uint16_t exponent;
if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
// Two finite values.
exponent = lhs->numBits() + rhs->numBits() - 1;
if (exponent > Range::MaxFiniteExponent) {
exponent = Range::IncludesInfinity;
}
} else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
!(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
!(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
// Two values that multiplied together won't produce a NaN.
exponent = Range::IncludesInfinity;
} else {
// Could be anything.
exponent = Range::IncludesInfinityAndNaN;
}
if (MissingAnyInt32Bounds(lhs, rhs)) {
return new (alloc)
Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
newMayIncludeNegativeZero, exponent);
}
int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
return new (alloc)
Range(std::min(std::min(a, b), std::min(c, d)),
std::max(std::max(a, b), std::max(c, d)), newCanHaveFractionalPart,
newMayIncludeNegativeZero, exponent);
}
Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
// If the shift doesn't loose bits or shift bits into the sign bit, we
// can simply compute the correct range by shifting.
if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
lhs->lower() &&
(int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
lhs->upper()) {
return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
uint32_t(lhs->upper()) << shift);
}
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
return Range::NewInt32Range(alloc, lhs->lower() >> shift,
lhs->upper() >> shift);
}
Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
// already been adjusted accordingly by our callers.
MOZ_ASSERT(lhs->isInt32());
int32_t shift = c & 0x1f;
// If the value is always non-negative or always negative, we can simply
// compute the correct range by shifting.
if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
uint32_t(lhs->upper()) >> shift);
}
// Otherwise return the most general range after the shift.
return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
}
Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
}
Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
// Canonicalize the shift range to 0 to 31.
int32_t shiftLower = rhs->lower();
int32_t shiftUpper = rhs->upper();
if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
shiftLower = 0;
shiftUpper = 31;
} else {
shiftLower &= 0x1f;
shiftUpper &= 0x1f;
if (shiftLower > shiftUpper) {
shiftLower = 0;
shiftUpper = 31;
}
}
MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
// The lhs bounds are signed, thus the minimum is either the lower bound
// shift by the smallest shift if negative or the lower bound shifted by the
// biggest shift otherwise. And the opposite for the maximum.
int32_t lhsLower = lhs->lower();
int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
int32_t lhsUpper = lhs->upper();
int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
return Range::NewInt32Range(alloc, min, max);
}
Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// ursh's left operand is uint32, not int32, but for range analysis we
// currently approximate it as int32. We assume here that the range has
// already been adjusted accordingly by our callers.
MOZ_ASSERT(lhs->isInt32());
MOZ_ASSERT(rhs->isInt32());
return Range::NewUInt32Range(
alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
}
Range* Range::abs(TempAllocator& alloc, const Range* op) {
int32_t l = op->lower_;
int32_t u = op->upper_;
FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
// Abs never produces a negative zero.
NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
return new (alloc) Range(
std::max(std::max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
std::max(std::max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
canBeNegativeZero, op->max_exponent_);
}
Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN()) {
return nullptr;
}
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
return new (alloc) Range(std::min(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
std::min(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
newCanHaveFractionalPart, newMayIncludeNegativeZero,
std::max(lhs->max_exponent_, rhs->max_exponent_));
}
Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
// If either operand is NaN, the result is NaN.
if (lhs->canBeNaN() || rhs->canBeNaN()) {
return nullptr;
}
FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
NegativeZeroFlag newMayIncludeNegativeZero =
NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
return new (alloc) Range(std::max(lhs->lower_, rhs->lower_),
lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
std::max(lhs->upper_, rhs->upper_),
lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
newCanHaveFractionalPart, newMayIncludeNegativeZero,
std::max(lhs->max_exponent_, rhs->max_exponent_));
}
Range* Range::floor(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
// Decrement lower bound of copy range if op have a factional part and lower
// bound is Int32 defined. Also we avoid to decrement when op have a
// fractional part but lower_ >= JSVAL_INT_MAX.
if (op->canHaveFractionalPart() && op->hasInt32LowerBound()) {
copy->setLowerInit(int64_t(copy->lower_) - 1);
}
// Also refine max_exponent_ because floor may have decremented int value
// If we've got int32 defined bounds, just deduce it using defined bounds.
// But, if we don't have those, value's max_exponent_ may have changed.
// Because we're looking to maintain an over estimation, if we can,
// we increment it.
if (copy->hasInt32Bounds())
copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
else if (copy->max_exponent_ < MaxFiniteExponent)
copy->max_exponent_++;
copy->canHaveFractionalPart_ = ExcludesFractionalParts;
copy->assertInvariants();
return copy;
}
Range* Range::ceil(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
// We need to refine max_exponent_ because ceil may have incremented the int
// value. If we have got int32 bounds defined, just deduce it using the
// defined bounds. Else we can just increment its value, as we are looking to
// maintain an over estimation.
if (copy->hasInt32Bounds()) {
copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
} else if (copy->max_exponent_ < MaxFiniteExponent) {
copy->max_exponent_++;
}
// If the range is definitely above 0 or below -1, we don't need to include
// -0; otherwise we do.
copy->canBeNegativeZero_ = ((copy->lower_ > 0) || (copy->upper_ <= -1))
? copy->canBeNegativeZero_
: IncludesNegativeZero;
copy->canHaveFractionalPart_ = ExcludesFractionalParts;
copy->assertInvariants();
return copy;
}
Range* Range::sign(TempAllocator& alloc, const Range* op) {
if (op->canBeNaN()) {
return nullptr;
}
return new (alloc) Range(std::max(std::min(op->lower_, 1), -1),
std::max(std::min(op->upper_, 1), -1),
Range::ExcludesFractionalParts,
NegativeZeroFlag(op->canBeNegativeZero()), 0);
}
Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
Range* copy = new (alloc) Range(*op);
if (copy->canBeNaN()) {
copy->max_exponent_ = Range::IncludesInfinity;
if (!copy->canBeZero()) {
Range zero;
zero.setDoubleSingleton(0);
copy->unionWith(&zero);
}
}
copy->refineToExcludeNegativeZero();
return copy;
}
bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
// The result can only be negative zero if both sides are finite and they
// have differing signs.
return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
(rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
}
bool Range::update(const Range* other) {
bool changed = lower_ != other->lower_ ||
hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
upper_ != other->upper_ ||
hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
canBeNegativeZero_ != other->canBeNegativeZero_ ||
max_exponent_ != other->max_exponent_;
if (changed) {
lower_ = other->lower_;
hasInt32LowerBound_ = other->hasInt32LowerBound_;
upper_ = other->upper_;
hasInt32UpperBound_ = other->hasInt32UpperBound_;
canHaveFractionalPart_ = other->canHaveFractionalPart_;
canBeNegativeZero_ = other->canBeNegativeZero_;
max_exponent_ = other->max_exponent_;
assertInvariants();
}
return changed;
}
///////////////////////////////////////////////////////////////////////////////
// Range Computation for MIR Nodes
///////////////////////////////////////////////////////////////////////////////
void MPhi::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range* range = nullptr;
for (size_t i = 0, e = numOperands(); i < e; i++) {
if (getOperand(i)->block()->unreachable()) {
JitSpew(JitSpew_Range, "Ignoring unreachable input %u",
getOperand(i)->id());
continue;
}
// Peek at the pre-bailout range so we can take a short-cut; if any of
// the operands has an unknown range, this phi has an unknown range.
if (!getOperand(i)->range()) {
return;
}
Range input(getOperand(i));
if (range) {
range->unionWith(&input);
} else {
range = new (alloc) Range(input);
}
}
setRange(range);
}
void MBeta::computeRange(TempAllocator& alloc) {
bool emptyRange = false;
Range opRange(getOperand(0));
Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
if (emptyRange) {
JitSpew(JitSpew_Range, "Marking block for inst %u unreachable", id());
block()->setUnreachableUnchecked();
} else {
setRange(range);
}
}
void MConstant::computeRange(TempAllocator& alloc) {
if (isTypeRepresentableAsDouble()) {
double d = numberToDouble();
setRange(Range::NewDoubleSingletonRange(alloc, d));
} else if (type() == MIRType::Boolean) {
bool b = toBoolean();
setRange(Range::NewInt32Range(alloc, b, b));
}
}
void MCharCodeAt::computeRange(TempAllocator& alloc) {
// ECMA 262 says that the integer will be non-negative and at most 65535.
setRange(Range::NewInt32Range(alloc, 0, 65535));
}
void MClampToUint8::computeRange(TempAllocator& alloc) {
setRange(Range::NewUInt32Range(alloc, 0, 255));
}
void MBitAnd::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::and_(alloc, &left, &right));
}
void MBitOr::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::or_(alloc, &left, &right));
}
void MBitXor::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
right.wrapAroundToInt32();
setRange(Range::xor_(alloc, &left, &right));
}
void MBitNot::computeRange(TempAllocator& alloc) {
if (type() == MIRType::Int64) {
return;
}
MOZ_ASSERT(type() == MIRType::Int32);
Range op(getOperand(0));
op.wrapAroundToInt32();
setRange(Range::not_(alloc, &op));
}
void MLsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::lsh(alloc, &left, c));
return;
}
right.wrapAroundToShiftCount();
setRange(Range::lsh(alloc, &left, &right));
}
void MRsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
left.wrapAroundToInt32();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::rsh(alloc, &left, c));
return;
}
right.wrapAroundToShiftCount();
setRange(Range::rsh(alloc, &left, &right));
}
void MUrsh::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
// ursh can be thought of as converting its left operand to uint32, or it
// can be thought of as converting its left operand to int32, and then
// reinterpreting the int32 bits as a uint32 value. Both approaches yield
// the same result. Since we lack support for full uint32 ranges, we use
// the second interpretation, though it does cause us to be conservative.
left.wrapAroundToInt32();
right.wrapAroundToShiftCount();
MConstant* rhsConst = getOperand(1)->maybeConstantValue();
if (rhsConst && rhsConst->type() == MIRType::Int32) {
int32_t c = rhsConst->toInt32();
setRange(Range::ursh(alloc, &left, c));
} else {
setRange(Range::ursh(alloc, &left, &right));
}
MOZ_ASSERT(range()->lower() >= 0);
}
void MAbs::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range other(getOperand(0));
Range* next = Range::abs(alloc, &other);
if (implicitTruncate_) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MFloor::computeRange(TempAllocator& alloc) {
Range other(getOperand(0));
setRange(Range::floor(alloc, &other));
}
void MCeil::computeRange(TempAllocator& alloc) {
Range other(getOperand(0));
setRange(Range::ceil(alloc, &other));
}
void MClz::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MCtz::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MPopcnt::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32) {
return;
}
setRange(Range::NewUInt32Range(alloc, 0, 32));
}
void MMinMax::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
setRange(isMax() ? Range::max(alloc, &left, &right)
: Range::min(alloc, &left, &right));
}
void MAdd::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
Range* next = Range::add(alloc, &left, &right);
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MSub::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
Range* next = Range::sub(alloc, &left, &right);
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MMul::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range left(getOperand(0));
Range right(getOperand(1));
if (canBeNegativeZero()) {
canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
}
Range* next = Range::mul(alloc, &left, &right);
if (!next->canBeNegativeZero()) {
canBeNegativeZero_ = false;
}
// Truncated multiplications could overflow in both directions
if (isTruncated()) {
next->wrapAroundToInt32();
}
setRange(next);
}
void MMod::computeRange(TempAllocator& alloc) {
if (type() != MIRType::Int32 && type() != MIRType::Double) {
return;
}
Range lhs(getOperand(0));
Range rhs(