Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/WarpBuilder.h"
#include "mozilla/DebugOnly.h"
#include "jit/BaselineFrame.h"
#include "jit/CacheIR.h"
#include "jit/CompileInfo.h"
#include "jit/InlineScriptTree.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
#include "jit/WarpCacheIRTranspiler.h"
#include "jit/WarpSnapshot.h"
#include "js/friend/ErrorMessages.h" // JSMSG_BAD_CONST_ASSIGN
#include "vm/GeneratorObject.h"
#include "vm/Interpreter.h"
#include "vm/Opcodes.h"
#include "gc/ObjectKind-inl.h"
#include "vm/BytecodeIterator-inl.h"
#include "vm/BytecodeLocation-inl.h"
using namespace js;
using namespace js::jit;
// Used for building the outermost script.
WarpBuilder::WarpBuilder(WarpSnapshot& snapshot, MIRGenerator& mirGen,
WarpCompilation* warpCompilation)
: WarpBuilderShared(snapshot, mirGen, nullptr),
warpCompilation_(warpCompilation),
graph_(mirGen.graph()),
info_(mirGen.outerInfo()),
scriptSnapshot_(snapshot.rootScript()),
script_(snapshot.rootScript()->script()),
loopStack_(mirGen.alloc()) {
opSnapshotIter_ = scriptSnapshot_->opSnapshots().getFirst();
}
// Used for building inlined scripts.
WarpBuilder::WarpBuilder(WarpBuilder* caller, WarpScriptSnapshot* snapshot,
CompileInfo& compileInfo, CallInfo* inlineCallInfo,
MResumePoint* callerResumePoint)
: WarpBuilderShared(caller->snapshot(), caller->mirGen(), nullptr),
warpCompilation_(caller->warpCompilation()),
graph_(caller->mirGen().graph()),
info_(compileInfo),
scriptSnapshot_(snapshot),
script_(snapshot->script()),
loopStack_(caller->mirGen().alloc()),
callerBuilder_(caller),
callerResumePoint_(callerResumePoint),
inlineCallInfo_(inlineCallInfo) {
opSnapshotIter_ = snapshot->opSnapshots().getFirst();
}
BytecodeSite* WarpBuilder::newBytecodeSite(BytecodeLocation loc) {
jsbytecode* pc = loc.toRawBytecode();
MOZ_ASSERT(info().inlineScriptTree()->script()->containsPC(pc));
return new (alloc()) BytecodeSite(info().inlineScriptTree(), pc);
}
const WarpOpSnapshot* WarpBuilder::getOpSnapshotImpl(
BytecodeLocation loc, WarpOpSnapshot::Kind kind) {
uint32_t offset = loc.bytecodeToOffset(script_);
// Skip snapshots until we get to a snapshot with offset >= offset. This is
// a loop because WarpBuilder can skip unreachable bytecode ops.
while (opSnapshotIter_ && opSnapshotIter_->offset() < offset) {
opSnapshotIter_ = opSnapshotIter_->getNext();
}
if (!opSnapshotIter_ || opSnapshotIter_->offset() != offset ||
opSnapshotIter_->kind() != kind) {
return nullptr;
}
return opSnapshotIter_;
}
void WarpBuilder::initBlock(MBasicBlock* block) {
graph().addBlock(block);
block->setLoopDepth(loopDepth());
current = block;
}
bool WarpBuilder::startNewBlock(MBasicBlock* predecessor, BytecodeLocation loc,
size_t numToPop) {
MBasicBlock* block =
MBasicBlock::NewPopN(graph(), info(), predecessor, newBytecodeSite(loc),
MBasicBlock::NORMAL, numToPop);
if (!block) {
return false;
}
initBlock(block);
return true;
}
bool WarpBuilder::startNewEntryBlock(size_t stackDepth, BytecodeLocation loc) {
MBasicBlock* block =
MBasicBlock::New(graph(), stackDepth, info(), /* maybePred = */ nullptr,
newBytecodeSite(loc), MBasicBlock::NORMAL);
if (!block) {
return false;
}
initBlock(block);
return true;
}
bool WarpBuilder::startNewLoopHeaderBlock(BytecodeLocation loopHead) {
MBasicBlock* header = MBasicBlock::NewPendingLoopHeader(
graph(), info(), current, newBytecodeSite(loopHead));
if (!header) {
return false;
}
initBlock(header);
return loopStack_.emplaceBack(header);
}
bool WarpBuilder::startNewOsrPreHeaderBlock(BytecodeLocation loopHead) {
MOZ_ASSERT(loopHead.is(JSOp::LoopHead));
MOZ_ASSERT(loopHead.toRawBytecode() == info().osrPc());
// Create two blocks:
// * The OSR entry block. This is always the graph's second block and has no
// predecessors. This is the entry point for OSR from the Baseline JIT.
// * The OSR preheader block. This has two predecessors: the OSR entry block
// and the current block.
MBasicBlock* pred = current;
// Create the OSR entry block.
if (!startNewEntryBlock(pred->stackDepth(), loopHead)) {
return false;
}
MBasicBlock* osrBlock = current;
graph().setOsrBlock(osrBlock);
graph().moveBlockAfter(*graph().begin(), osrBlock);
MOsrEntry* entry = MOsrEntry::New(alloc());
osrBlock->add(entry);
// Initialize environment chain.
{
uint32_t slot = info().environmentChainSlot();
MInstruction* envv;
if (usesEnvironmentChain()) {
envv = MOsrEnvironmentChain::New(alloc(), entry);
} else {
// Use an undefined value if the script does not need its environment
// chain, to match the main entry point.
envv = MConstant::New(alloc(), UndefinedValue());
}
osrBlock->add(envv);
osrBlock->initSlot(slot, envv);
}
// Initialize return value.
{
MInstruction* returnValue;
if (!script_->noScriptRval()) {
returnValue = MOsrReturnValue::New(alloc(), entry);
} else {
returnValue = MConstant::New(alloc(), UndefinedValue());
}
osrBlock->add(returnValue);
osrBlock->initSlot(info().returnValueSlot(), returnValue);
}
// Initialize arguments object.
MInstruction* argsObj = nullptr;
if (info().needsArgsObj()) {
argsObj = MOsrArgumentsObject::New(alloc(), entry);
osrBlock->add(argsObj);
osrBlock->initSlot(info().argsObjSlot(), argsObj);
}
if (info().hasFunMaybeLazy()) {
// Initialize |this| parameter.
MParameter* thisv = MParameter::New(alloc(), MParameter::THIS_SLOT);
osrBlock->add(thisv);
osrBlock->initSlot(info().thisSlot(), thisv);
// Initialize arguments. There are three cases:
//
// 1) There's no ArgumentsObject or it doesn't alias formals. In this case
// we can just use the frame's argument slot.
// 2) The ArgumentsObject aliases formals and the argument is stored in the
// CallObject. Use |undefined| because we can't load from the arguments
// object and code will use the CallObject anyway.
// 3) The ArgumentsObject aliases formals and the argument isn't stored in
// the CallObject. We have to load it from the ArgumentsObject.
for (uint32_t i = 0; i < info().nargs(); i++) {
uint32_t slot = info().argSlotUnchecked(i);
MInstruction* osrv;
if (!info().argsObjAliasesFormals()) {
osrv = MParameter::New(alloc().fallible(), i);
} else if (script_->formalIsAliased(i)) {
osrv = MConstant::New(alloc().fallible(), UndefinedValue());
} else {
osrv = MGetArgumentsObjectArg::New(alloc().fallible(), argsObj, i);
}
if (!osrv) {
return false;
}
current->add(osrv);
current->initSlot(slot, osrv);
}
}
// Initialize locals.
uint32_t nlocals = info().nlocals();
for (uint32_t i = 0; i < nlocals; i++) {
uint32_t slot = info().localSlot(i);
ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(i);
MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
if (!osrv) {
return false;
}
current->add(osrv);
current->initSlot(slot, osrv);
}
// Initialize expression stack slots.
uint32_t numStackSlots = current->stackDepth() - info().firstStackSlot();
for (uint32_t i = 0; i < numStackSlots; i++) {
uint32_t slot = info().stackSlot(i);
ptrdiff_t offset = BaselineFrame::reverseOffsetOfLocal(nlocals + i);
MOsrValue* osrv = MOsrValue::New(alloc().fallible(), entry, offset);
if (!osrv) {
return false;
}
current->add(osrv);
current->initSlot(slot, osrv);
}
MStart* start = MStart::New(alloc());
current->add(start);
// Note: phi specialization can add type guard instructions to the OSR entry
// block if needed. See TypeAnalyzer::shouldSpecializeOsrPhis.
// Create the preheader block, with the predecessor block and OSR block as
// predecessors.
if (!startNewBlock(pred, loopHead)) {
return false;
}
pred->end(MGoto::New(alloc(), current));
osrBlock->end(MGoto::New(alloc(), current));
if (!current->addPredecessor(alloc(), osrBlock)) {
return false;
}
return true;
}
bool WarpBuilder::addPendingEdge(BytecodeLocation target, MBasicBlock* block,
uint32_t successor, uint32_t numToPop) {
MOZ_ASSERT(successor < block->lastIns()->numSuccessors());
MOZ_ASSERT(numToPop <= block->stackDepth());
jsbytecode* targetPC = target.toRawBytecode();
PendingEdgesMap::AddPtr p = pendingEdges_.lookupForAdd(targetPC);
if (p) {
return p->value().emplaceBack(block, successor, numToPop);
}
PendingEdges edges;
static_assert(PendingEdges::InlineLength >= 1,
"Appending one element should be infallible");
MOZ_ALWAYS_TRUE(edges.emplaceBack(block, successor, numToPop));
return pendingEdges_.add(p, targetPC, std::move(edges));
}
bool WarpBuilder::build() {
if (!buildPrologue()) {
return false;
}
if (!buildBody()) {
return false;
}
if (!MPhi::markIteratorPhis(*iterators())) {
return false;
}
MOZ_ASSERT_IF(info().osrPc(), graph().osrBlock());
MOZ_ASSERT(loopStack_.empty());
MOZ_ASSERT(loopDepth() == 0);
return true;
}
bool WarpBuilder::buildInline() {
if (!buildInlinePrologue()) {
return false;
}
if (!buildBody()) {
return false;
}
MOZ_ASSERT(loopStack_.empty());
return true;
}
MInstruction* WarpBuilder::buildNamedLambdaEnv(MDefinition* callee,
MDefinition* env,
NamedLambdaObject* templateObj) {
MOZ_ASSERT(!templateObj->hasDynamicSlots());
MInstruction* namedLambda = MNewNamedLambdaObject::New(alloc(), templateObj);
current->add(namedLambda);
#ifdef DEBUG
// Assert in debug mode we can elide the post write barriers.
current->add(MAssertCanElidePostWriteBarrier::New(alloc(), namedLambda, env));
current->add(
MAssertCanElidePostWriteBarrier::New(alloc(), namedLambda, callee));
#endif
// Initialize the object's reserved slots. No post barrier is needed here:
// the object will be allocated in the nursery if possible, and if the
// tenured heap is used instead, a minor collection will have been performed
// that moved env/callee to the tenured heap.
size_t enclosingSlot = NamedLambdaObject::enclosingEnvironmentSlot();
size_t lambdaSlot = NamedLambdaObject::lambdaSlot();
current->add(MStoreFixedSlot::NewUnbarriered(alloc(), namedLambda,
enclosingSlot, env));
current->add(MStoreFixedSlot::NewUnbarriered(alloc(), namedLambda, lambdaSlot,
callee));
return namedLambda;
}
MInstruction* WarpBuilder::buildCallObject(MDefinition* callee,
MDefinition* env,
CallObject* templateObj) {
MConstant* templateCst = constant(ObjectValue(*templateObj));
MNewCallObject* callObj = MNewCallObject::New(alloc(), templateCst);
current->add(callObj);
#ifdef DEBUG
// Assert in debug mode we can elide the post write barriers.
current->add(MAssertCanElidePostWriteBarrier::New(alloc(), callObj, env));
current->add(MAssertCanElidePostWriteBarrier::New(alloc(), callObj, callee));
#endif
// Initialize the object's reserved slots. No post barrier is needed here,
// for the same reason as in buildNamedLambdaEnv.
size_t enclosingSlot = CallObject::enclosingEnvironmentSlot();
size_t calleeSlot = CallObject::calleeSlot();
current->add(
MStoreFixedSlot::NewUnbarriered(alloc(), callObj, enclosingSlot, env));
current->add(
MStoreFixedSlot::NewUnbarriered(alloc(), callObj, calleeSlot, callee));
// Copy closed-over argument slots if there aren't parameter expressions.
MSlots* slots = nullptr;
for (PositionalFormalParameterIter fi(script_); fi; fi++) {
if (!fi.closedOver()) {
continue;
}
if (!alloc().ensureBallast()) {
return nullptr;
}
uint32_t slot = fi.location().slot();
uint32_t formal = fi.argumentSlot();
uint32_t numFixedSlots = templateObj->numFixedSlots();
MDefinition* param;
if (script_->functionHasParameterExprs()) {
param = constant(MagicValue(JS_UNINITIALIZED_LEXICAL));
} else {
param = current->getSlot(info().argSlotUnchecked(formal));
}
#ifdef DEBUG
// Assert in debug mode we can elide the post write barrier.
current->add(MAssertCanElidePostWriteBarrier::New(alloc(), callObj, param));
#endif
if (slot >= numFixedSlots) {
if (!slots) {
slots = MSlots::New(alloc(), callObj);
current->add(slots);
}
uint32_t dynamicSlot = slot - numFixedSlots;
current->add(MStoreDynamicSlot::NewUnbarriered(alloc(), slots,
dynamicSlot, param));
} else {
current->add(
MStoreFixedSlot::NewUnbarriered(alloc(), callObj, slot, param));
}
}
return callObj;
}
bool WarpBuilder::buildEnvironmentChain() {
const WarpEnvironment& env = scriptSnapshot()->environment();
if (env.is<NoEnvironment>()) {
return true;
}
MInstruction* envDef = env.match(
[](const NoEnvironment&) -> MInstruction* {
MOZ_CRASH("Already handled");
},
[this](JSObject* obj) -> MInstruction* {
return constant(ObjectValue(*obj));
},
[this](const FunctionEnvironment& env) -> MInstruction* {
MDefinition* callee = getCallee();
MInstruction* envDef = MFunctionEnvironment::New(alloc(), callee);
current->add(envDef);
if (NamedLambdaObject* obj = env.namedLambdaTemplate) {
envDef = buildNamedLambdaEnv(callee, envDef, obj);
}
if (CallObject* obj = env.callObjectTemplate) {
envDef = buildCallObject(callee, envDef, obj);
if (!envDef) {
return nullptr;
}
}
return envDef;
});
if (!envDef) {
return false;
}
// Update the environment slot from UndefinedValue only after the initial
// environment is created so that bailout doesn't see a partial environment.
// See: |BaselineStackBuilder::buildBaselineFrame|
current->setEnvironmentChain(envDef);
return true;
}
bool WarpBuilder::buildPrologue() {
BytecodeLocation startLoc(script_, script_->code());
if (!startNewEntryBlock(info().firstStackSlot(), startLoc)) {
return false;
}
if (info().hasFunMaybeLazy()) {
// Initialize |this|.
MParameter* param = MParameter::New(alloc(), MParameter::THIS_SLOT);
current->add(param);
current->initSlot(info().thisSlot(), param);
// Initialize arguments.
for (uint32_t i = 0; i < info().nargs(); i++) {
MParameter* param = MParameter::New(alloc().fallible(), i);
if (!param) {
return false;
}
current->add(param);
current->initSlot(info().argSlotUnchecked(i), param);
}
}
MConstant* undef = constant(UndefinedValue());
// Initialize local slots.
for (uint32_t i = 0; i < info().nlocals(); i++) {
current->initSlot(info().localSlot(i), undef);
}
// Initialize the environment chain, return value, and arguments object slots.
current->initSlot(info().environmentChainSlot(), undef);
current->initSlot(info().returnValueSlot(), undef);
if (info().needsArgsObj()) {
current->initSlot(info().argsObjSlot(), undef);
}
current->add(MStart::New(alloc()));
// Guard against over-recursion.
MCheckOverRecursed* check = MCheckOverRecursed::New(alloc());
current->add(check);
if (!buildEnvironmentChain()) {
return false;
}
#ifdef JS_CACHEIR_SPEW
if (snapshot().needsFinalWarmUpCount()) {
MIncrementWarmUpCounter* ins =
MIncrementWarmUpCounter::New(alloc(), script_);
current->add(ins);
}
#endif
return true;
}
bool WarpBuilder::buildInlinePrologue() {
// Generate entry block.
BytecodeLocation startLoc(script_, script_->code());
if (!startNewEntryBlock(info().firstStackSlot(), startLoc)) {
return false;
}
current->setCallerResumePoint(callerResumePoint());
// Connect the entry block to the last block in the caller's graph.
MBasicBlock* pred = callerBuilder()->current;
MOZ_ASSERT(pred == callerResumePoint()->block());
pred->end(MGoto::New(alloc(), current));
if (!current->addPredecessorWithoutPhis(pred)) {
return false;
}
MConstant* undef = constant(UndefinedValue());
// Initialize env chain slot to Undefined. It's set later by
// |buildEnvironmentChain|.
current->initSlot(info().environmentChainSlot(), undef);
// Initialize |return value| slot.
current->initSlot(info().returnValueSlot(), undef);
// Initialize |arguments| slot if needed.
if (info().needsArgsObj()) {
current->initSlot(info().argsObjSlot(), undef);
}
// Initialize |this| slot.
current->initSlot(info().thisSlot(), inlineCallInfo()->thisArg());
uint32_t callerArgs = inlineCallInfo()->argc();
uint32_t actualArgs = info().nargs();
uint32_t passedArgs = std::min<uint32_t>(callerArgs, actualArgs);
// Initialize actually set arguments.
for (uint32_t i = 0; i < passedArgs; i++) {
MDefinition* arg = inlineCallInfo()->getArg(i);
current->initSlot(info().argSlotUnchecked(i), arg);
}
// Pass undefined for missing arguments.
for (uint32_t i = passedArgs; i < actualArgs; i++) {
current->initSlot(info().argSlotUnchecked(i), undef);
}
// Initialize local slots.
for (uint32_t i = 0; i < info().nlocals(); i++) {
current->initSlot(info().localSlot(i), undef);
}
MOZ_ASSERT(current->entryResumePoint()->stackDepth() == info().totalSlots());
if (!buildEnvironmentChain()) {
return false;
}
return true;
}
#ifdef DEBUG
// In debug builds, after compiling a bytecode op, this class is used to check
// that all values popped by this opcode either:
//
// (1) Have the ImplicitlyUsed flag set on them.
// (2) Have more uses than before compiling this op (the value is
// used as operand of a new MIR instruction).
//
// This is used to catch problems where WarpBuilder pops a value without
// adding any SSA uses and doesn't call setImplicitlyUsedUnchecked on it.
class MOZ_RAII WarpPoppedValueUseChecker {
Vector<MDefinition*, 4, SystemAllocPolicy> popped_;
Vector<size_t, 4, SystemAllocPolicy> poppedUses_;
MBasicBlock* current_;
BytecodeLocation loc_;
public:
WarpPoppedValueUseChecker(MBasicBlock* current, BytecodeLocation loc)
: current_(current), loc_(loc) {}
[[nodiscard]] bool init() {
// Don't require SSA uses for values popped by these ops.
switch (loc_.getOp()) {
case JSOp::Pop:
case JSOp::PopN:
case JSOp::DupAt:
case JSOp::Dup:
case JSOp::Dup2:
case JSOp::Pick:
case JSOp::Unpick:
case JSOp::Swap:
case JSOp::SetArg:
case JSOp::SetLocal:
case JSOp::InitLexical:
case JSOp::SetRval:
case JSOp::Void:
// Basic stack/local/argument management opcodes.
return true;
case JSOp::Case:
case JSOp::Default:
// These ops have to pop the switch value when branching but don't
// actually use it.
return true;
default:
break;
}
unsigned nuses = loc_.useCount();
for (unsigned i = 0; i < nuses; i++) {
MDefinition* def = current_->peek(-int32_t(i + 1));
if (!popped_.append(def) || !poppedUses_.append(def->defUseCount())) {
return false;
}
}
return true;
}
void checkAfterOp() {
for (size_t i = 0; i < popped_.length(); i++) {
// First value popped by JSOp::EndIter is not used at all, it's similar
// to JSOp::Pop above.
if (loc_.is(JSOp::EndIter) && i == 0) {
continue;
}
MOZ_ASSERT(popped_[i]->isImplicitlyUsed() ||
popped_[i]->defUseCount() > poppedUses_[i]);
}
}
};
#endif
bool WarpBuilder::buildBody() {
for (BytecodeLocation loc : AllBytecodesIterable(script_)) {
if (mirGen().shouldCancel("WarpBuilder (opcode loop)")) {
return false;
}
// Skip unreachable ops (for example code after a 'return' or 'throw') until
// we get to the next jump target.
if (hasTerminatedBlock()) {
// Finish any "broken" loops with an unreachable backedge. For example:
//
// do {
// ...
// return;
// ...
// } while (x);
//
// This loop never actually loops.
if (loc.isBackedge() && !loopStack_.empty()) {
BytecodeLocation loopHead(script_, loopStack_.back().header()->pc());
if (loc.isBackedgeForLoophead(loopHead)) {
decLoopDepth();
loopStack_.popBack();
}
}
if (!loc.isJumpTarget()) {
continue;
}
}
if (!alloc().ensureBallast()) {
return false;
}
#ifdef DEBUG
WarpPoppedValueUseChecker useChecker(current, loc);
if (!useChecker.init()) {
return false;
}
#endif
JSOp op = loc.getOp();
#define BUILD_OP(OP, ...) \
case JSOp::OP: \
if (MOZ_UNLIKELY(!this->build_##OP(loc))) { \
return false; \
} \
break;
switch (op) { FOR_EACH_OPCODE(BUILD_OP) }
#undef BUILD_OP
#ifdef DEBUG
useChecker.checkAfterOp();
#endif
}
return true;
}
#define DEF_OP(OP) \
bool WarpBuilder::build_##OP(BytecodeLocation) { \
MOZ_CRASH("Unsupported op"); \
}
WARP_UNSUPPORTED_OPCODE_LIST(DEF_OP)
#undef DEF_OP
bool WarpBuilder::build_Nop(BytecodeLocation) { return true; }
bool WarpBuilder::build_NopDestructuring(BytecodeLocation) { return true; }
bool WarpBuilder::build_TryDestructuring(BytecodeLocation) {
// Set the hasTryBlock flag to turn off optimizations that eliminate dead
// resume points operands because the exception handler code for
// TryNoteKind::Destructuring is effectively a (specialized) catch-block.
graph().setHasTryBlock();
return true;
}
bool WarpBuilder::build_Lineno(BytecodeLocation) { return true; }
bool WarpBuilder::build_DebugLeaveLexicalEnv(BytecodeLocation) { return true; }
bool WarpBuilder::build_Undefined(BytecodeLocation) {
pushConstant(UndefinedValue());
return true;
}
bool WarpBuilder::build_Void(BytecodeLocation) {
current->pop();
pushConstant(UndefinedValue());
return true;
}
bool WarpBuilder::build_Null(BytecodeLocation) {
pushConstant(NullValue());
return true;
}
bool WarpBuilder::build_Hole(BytecodeLocation) {
pushConstant(MagicValue(JS_ELEMENTS_HOLE));
return true;
}
bool WarpBuilder::build_Uninitialized(BytecodeLocation) {
pushConstant(MagicValue(JS_UNINITIALIZED_LEXICAL));
return true;
}
bool WarpBuilder::build_IsConstructing(BytecodeLocation) {
pushConstant(MagicValue(JS_IS_CONSTRUCTING));
return true;
}
bool WarpBuilder::build_False(BytecodeLocation) {
pushConstant(BooleanValue(false));
return true;
}
bool WarpBuilder::build_True(BytecodeLocation) {
pushConstant(BooleanValue(true));
return true;
}
bool WarpBuilder::build_Pop(BytecodeLocation) {
current->pop();
return true;
}
bool WarpBuilder::build_PopN(BytecodeLocation loc) {
for (uint32_t i = 0, n = loc.getPopCount(); i < n; i++) {
current->pop();
}
return true;
}
bool WarpBuilder::build_Dup(BytecodeLocation) {
current->pushSlot(current->stackDepth() - 1);
return true;
}
bool WarpBuilder::build_Dup2(BytecodeLocation) {
uint32_t lhsSlot = current->stackDepth() - 2;
uint32_t rhsSlot = current->stackDepth() - 1;
current->pushSlot(lhsSlot);
current->pushSlot(rhsSlot);
return true;
}
bool WarpBuilder::build_DupAt(BytecodeLocation loc) {
current->pushSlot(current->stackDepth() - 1 - loc.getDupAtIndex());
return true;
}
bool WarpBuilder::build_Swap(BytecodeLocation) {
current->swapAt(-1);
return true;
}
bool WarpBuilder::build_Pick(BytecodeLocation loc) {
int32_t depth = -int32_t(loc.getPickDepth());
current->pick(depth);
return true;
}
bool WarpBuilder::build_Unpick(BytecodeLocation loc) {
int32_t depth = -int32_t(loc.getUnpickDepth());
current->unpick(depth);
return true;
}
bool WarpBuilder::build_Zero(BytecodeLocation) {
pushConstant(Int32Value(0));
return true;
}
bool WarpBuilder::build_One(BytecodeLocation) {
pushConstant(Int32Value(1));
return true;
}
bool WarpBuilder::build_Int8(BytecodeLocation loc) {
pushConstant(Int32Value(loc.getInt8()));
return true;
}
bool WarpBuilder::build_Uint16(BytecodeLocation loc) {
pushConstant(Int32Value(loc.getUint16()));
return true;
}
bool WarpBuilder::build_Uint24(BytecodeLocation loc) {
pushConstant(Int32Value(loc.getUint24()));
return true;
}
bool WarpBuilder::build_Int32(BytecodeLocation loc) {
pushConstant(Int32Value(loc.getInt32()));
return true;
}
bool WarpBuilder::build_Double(BytecodeLocation loc) {
pushConstant(loc.getInlineValue());
return true;
}
bool WarpBuilder::build_BigInt(BytecodeLocation loc) {
BigInt* bi = loc.getBigInt(script_);
pushConstant(BigIntValue(bi));
return true;
}
bool WarpBuilder::build_String(BytecodeLocation loc) {
JSString* str = loc.getString(script_);
pushConstant(StringValue(str));
return true;
}
bool WarpBuilder::build_Symbol(BytecodeLocation loc) {
uint32_t which = loc.getSymbolIndex();
JS::Symbol* sym = mirGen().runtime->wellKnownSymbols().get(which);
pushConstant(SymbolValue(sym));
return true;
}
bool WarpBuilder::build_RegExp(BytecodeLocation loc) {
RegExpObject* reObj = loc.getRegExp(script_);
auto* snapshot = getOpSnapshot<WarpRegExp>(loc);
MRegExp* regexp = MRegExp::New(alloc(), reObj, snapshot->hasShared());
current->add(regexp);
current->push(regexp);
return true;
}
bool WarpBuilder::build_Return(BytecodeLocation) {
MDefinition* def = current->pop();
MReturn* ret = MReturn::New(alloc(), def);
current->end(ret);
if (!graph().addReturn(current)) {
return false;
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::build_RetRval(BytecodeLocation) {
MDefinition* rval;
if (script_->noScriptRval()) {
rval = constant(UndefinedValue());
} else {
rval = current->getSlot(info().returnValueSlot());
}
MReturn* ret = MReturn::New(alloc(), rval);
current->end(ret);
if (!graph().addReturn(current)) {
return false;
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::build_SetRval(BytecodeLocation) {
MOZ_ASSERT(!script_->noScriptRval());
MDefinition* rval = current->pop();
current->setSlot(info().returnValueSlot(), rval);
return true;
}
bool WarpBuilder::build_GetRval(BytecodeLocation) {
MOZ_ASSERT(!script_->noScriptRval());
MDefinition* rval = current->getSlot(info().returnValueSlot());
current->push(rval);
return true;
}
bool WarpBuilder::build_GetLocal(BytecodeLocation loc) {
current->pushLocal(loc.local());
return true;
}
bool WarpBuilder::build_SetLocal(BytecodeLocation loc) {
current->setLocal(loc.local());
return true;
}
bool WarpBuilder::build_InitLexical(BytecodeLocation loc) {
current->setLocal(loc.local());
return true;
}
bool WarpBuilder::build_GetArg(BytecodeLocation loc) {
uint32_t arg = loc.arg();
if (info().argsObjAliasesFormals()) {
MDefinition* argsObj = current->argumentsObject();
auto* getArg = MGetArgumentsObjectArg::New(alloc(), argsObj, arg);
current->add(getArg);
current->push(getArg);
} else {
current->pushArg(arg);
}
return true;
}
bool WarpBuilder::build_SetArg(BytecodeLocation loc) {
MOZ_ASSERT(script_->jitScript()->modifiesArguments());
uint32_t arg = loc.arg();
MDefinition* val = current->peek(-1);
if (!info().argsObjAliasesFormals()) {
// Either |arguments| is never referenced within this function, or
// it doesn't map to the actual arguments values. Either way, we
// don't need to worry about synchronizing the argument values
// when writing to them.
current->setArg(arg);
return true;
}
// If an arguments object is in use, and it aliases formals, then all SetArgs
// must go through the arguments object.
MDefinition* argsObj = current->argumentsObject();
current->add(MPostWriteBarrier::New(alloc(), argsObj, val));
auto* ins = MSetArgumentsObjectArg::New(alloc(), argsObj, val, arg);
current->add(ins);
return resumeAfter(ins, loc);
}
bool WarpBuilder::build_ToNumeric(BytecodeLocation loc) {
return buildUnaryOp(loc);
}
bool WarpBuilder::buildUnaryOp(BytecodeLocation loc) {
MDefinition* value = current->pop();
return buildIC(loc, CacheKind::UnaryArith, {value});
}
bool WarpBuilder::build_Inc(BytecodeLocation loc) { return buildUnaryOp(loc); }
bool WarpBuilder::build_Dec(BytecodeLocation loc) { return buildUnaryOp(loc); }
bool WarpBuilder::build_Pos(BytecodeLocation loc) { return buildUnaryOp(loc); }
bool WarpBuilder::build_Neg(BytecodeLocation loc) { return buildUnaryOp(loc); }
bool WarpBuilder::build_BitNot(BytecodeLocation loc) {
return buildUnaryOp(loc);
}
bool WarpBuilder::buildBinaryOp(BytecodeLocation loc) {
MDefinition* right = current->pop();
MDefinition* left = current->pop();
return buildIC(loc, CacheKind::BinaryArith, {left, right});
}
bool WarpBuilder::build_Add(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Sub(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Mul(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Div(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Mod(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Pow(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_BitAnd(BytecodeLocation loc) {
return buildBinaryOp(loc);
}
bool WarpBuilder::build_BitOr(BytecodeLocation loc) {
return buildBinaryOp(loc);
}
bool WarpBuilder::build_BitXor(BytecodeLocation loc) {
return buildBinaryOp(loc);
}
bool WarpBuilder::build_Lsh(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Rsh(BytecodeLocation loc) { return buildBinaryOp(loc); }
bool WarpBuilder::build_Ursh(BytecodeLocation loc) {
return buildBinaryOp(loc);
}
bool WarpBuilder::buildCompareOp(BytecodeLocation loc) {
MDefinition* right = current->pop();
MDefinition* left = current->pop();
return buildIC(loc, CacheKind::Compare, {left, right});
}
bool WarpBuilder::build_Eq(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_Ne(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_Lt(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_Le(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_Gt(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_Ge(BytecodeLocation loc) { return buildCompareOp(loc); }
bool WarpBuilder::build_StrictEq(BytecodeLocation loc) {
return buildCompareOp(loc);
}
bool WarpBuilder::build_StrictNe(BytecodeLocation loc) {
return buildCompareOp(loc);
}
// Returns true iff the MTest added for |op| has a true-target corresponding
// with the join point in the bytecode.
static bool TestTrueTargetIsJoinPoint(JSOp op) {
switch (op) {
case JSOp::JumpIfTrue:
case JSOp::Or:
case JSOp::Case:
return true;
case JSOp::JumpIfFalse:
case JSOp::And:
case JSOp::Coalesce:
return false;
default:
MOZ_CRASH("Unexpected op");
}
}
bool WarpBuilder::build_JumpTarget(BytecodeLocation loc) {
PendingEdgesMap::Ptr p = pendingEdges_.lookup(loc.toRawBytecode());
if (!p) {
// No (reachable) jumps so this is just a no-op.
return true;
}
PendingEdges edges(std::move(p->value()));
pendingEdges_.remove(p);
MOZ_ASSERT(!edges.empty());
// Create join block if there's fall-through from the previous bytecode op.
if (!hasTerminatedBlock()) {
MBasicBlock* pred = current;
if (!startNewBlock(pred, loc)) {
return false;
}
pred->end(MGoto::New(alloc(), current));
}
for (const PendingEdge& edge : edges) {
MBasicBlock* source = edge.block();
uint32_t numToPop = edge.numToPop();
if (hasTerminatedBlock()) {
if (!startNewBlock(source, loc, numToPop)) {
return false;
}
} else {
MOZ_ASSERT(source->stackDepth() - numToPop == current->stackDepth());
if (!current->addPredecessorPopN(alloc(), source, numToPop)) {
return false;
}
}
MOZ_ASSERT(source->lastIns()->isTest() || source->lastIns()->isGoto() ||
source->lastIns()->isTableSwitch());
source->lastIns()->initSuccessor(edge.successor(), current);
}
MOZ_ASSERT(!hasTerminatedBlock());
return true;
}
bool WarpBuilder::addIteratorLoopPhis(BytecodeLocation loopHead) {
// When unwinding the stack for a thrown exception, the exception handler must
// close live iterators. For ForIn and Destructuring loops, the exception
// handler needs access to values on the stack. To prevent them from being
// optimized away (and replaced with the JS_OPTIMIZED_OUT MagicValue), we need
// to mark the phis (and phis they flow into) as having implicit uses.
// See ProcessTryNotes in vm/Interpreter.cpp and CloseLiveIteratorIon in
// jit/JitFrames.cpp
MOZ_ASSERT(current->stackDepth() >= info().firstStackSlot());
bool emptyStack = current->stackDepth() == info().firstStackSlot();
if (emptyStack) {
return true;
}
jsbytecode* loopHeadPC = loopHead.toRawBytecode();
for (TryNoteIterAllNoGC tni(script_, loopHeadPC); !tni.done(); ++tni) {
const TryNote& tn = **tni;
// Stop if we reach an outer loop because outer loops were already
// processed when we visited their loop headers.
if (tn.isLoop()) {
BytecodeLocation tnStart = script_->offsetToLocation(tn.start);
if (tnStart != loopHead) {
MOZ_ASSERT(tnStart.is(JSOp::LoopHead));
MOZ_ASSERT(tnStart < loopHead);
return true;
}
}
switch (tn.kind()) {
case TryNoteKind::Destructuring:
case TryNoteKind::ForIn: {
// For for-in loops we add the iterator object to iterators(). For
// destructuring loops we add the "done" value that's on top of the
// stack and used in the exception handler.
MOZ_ASSERT(tn.stackDepth >= 1);
uint32_t slot = info().stackSlot(tn.stackDepth - 1);
MPhi* phi = current->getSlot(slot)->toPhi();
if (!iterators()->append(phi)) {
return false;
}
break;
}
case TryNoteKind::Loop:
case TryNoteKind::ForOf:
// Regular loops do not have iterators to close. ForOf loops handle
// unwinding using catch blocks.
break;
default:
break;
}
}
return true;
}
bool WarpBuilder::build_LoopHead(BytecodeLocation loc) {
// All loops have the following bytecode structure:
//
// LoopHead
// ...
// JumpIfTrue/Goto to LoopHead
if (hasTerminatedBlock()) {
// The whole loop is unreachable.
return true;
}
// Handle OSR from Baseline JIT code.
if (loc.toRawBytecode() == info().osrPc()) {
if (!startNewOsrPreHeaderBlock(loc)) {
return false;
}
}
incLoopDepth();
MBasicBlock* pred = current;
if (!startNewLoopHeaderBlock(loc)) {
return false;
}
pred->end(MGoto::New(alloc(), current));
if (!addIteratorLoopPhis(loc)) {
return false;
}
MInterruptCheck* check = MInterruptCheck::New(alloc());
current->add(check);
#ifdef JS_CACHEIR_SPEW
if (snapshot().needsFinalWarmUpCount()) {
MIncrementWarmUpCounter* ins =
MIncrementWarmUpCounter::New(alloc(), script_);
current->add(ins);
}
#endif
return true;
}
bool WarpBuilder::buildTestOp(BytecodeLocation loc) {
MDefinition* originalValue = current->peek(-1);
if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
// If we have CacheIR, we can use it to refine the input. Note that
// the transpiler doesn't generate any control instructions. Instead,
// we fall through and generate them below.
MDefinition* value = current->pop();
if (!TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, {value})) {
return false;
}
}
if (loc.isBackedge()) {
return buildTestBackedge(loc);
}
JSOp op = loc.getOp();
BytecodeLocation target1 = loc.next();
BytecodeLocation target2 = loc.getJumpTarget();
if (TestTrueTargetIsJoinPoint(op)) {
std::swap(target1, target2);
}
MDefinition* value = current->pop();
// JSOp::And and JSOp::Or leave the top stack value unchanged. The
// top stack value may have been converted to bool by a transpiled
// ToBool IC, so we push the original value.
bool mustKeepCondition = (op == JSOp::And || op == JSOp::Or);
if (mustKeepCondition) {
current->push(originalValue);
}
// If this op always branches to the same location we treat this as a
// JSOp::Goto.
if (target1 == target2) {
value->setImplicitlyUsedUnchecked();
return buildForwardGoto(target1);
}
MTest* test = MTest::New(alloc(), value, /* ifTrue = */ nullptr,
/* ifFalse = */ nullptr);
current->end(test);
// JSOp::Case must pop a second value on the true-branch (the input to the
// switch-statement).
uint32_t numToPop = (loc.getOp() == JSOp::Case) ? 1 : 0;
if (!addPendingEdge(target1, current, MTest::TrueBranchIndex, numToPop)) {
return false;
}
if (!addPendingEdge(target2, current, MTest::FalseBranchIndex)) {
return false;
}
if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
test->setObservedTypes(typesSnapshot->list());
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::buildTestBackedge(BytecodeLocation loc) {
MOZ_ASSERT(loc.is(JSOp::JumpIfTrue));
MOZ_ASSERT(loopDepth() > 0);
MDefinition* value = current->pop();
BytecodeLocation loopHead = loc.getJumpTarget();
MOZ_ASSERT(loopHead.is(JSOp::LoopHead));
BytecodeLocation successor = loc.next();
// We can finish the loop now. Use the loophead pc instead of the current pc
// because the stack depth at the start of that op matches the current stack
// depth (after popping our operand).
MBasicBlock* pred = current;
if (!startNewBlock(current, loopHead)) {
return false;
}
MTest* test = MTest::New(alloc(), value, /* ifTrue = */ current,
/* ifFalse = */ nullptr);
pred->end(test);
if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
test->setObservedTypes(typesSnapshot->list());
}
if (!addPendingEdge(successor, pred, MTest::FalseBranchIndex)) {
return false;
}
return buildBackedge();
}
bool WarpBuilder::build_JumpIfFalse(BytecodeLocation loc) {
return buildTestOp(loc);
}
bool WarpBuilder::build_JumpIfTrue(BytecodeLocation loc) {
return buildTestOp(loc);
}
bool WarpBuilder::build_And(BytecodeLocation loc) { return buildTestOp(loc); }
bool WarpBuilder::build_Or(BytecodeLocation loc) { return buildTestOp(loc); }
bool WarpBuilder::build_Case(BytecodeLocation loc) { return buildTestOp(loc); }
bool WarpBuilder::build_Default(BytecodeLocation loc) {
current->pop();
return buildForwardGoto(loc.getJumpTarget());
}
bool WarpBuilder::build_Coalesce(BytecodeLocation loc) {
BytecodeLocation target1 = loc.next();
BytecodeLocation target2 = loc.getJumpTarget();
MOZ_ASSERT(target2 > target1);
MDefinition* value = current->peek(-1);
MInstruction* isNullOrUndefined = MIsNullOrUndefined::New(alloc(), value);
current->add(isNullOrUndefined);
current->end(MTest::New(alloc(), isNullOrUndefined, /* ifTrue = */ nullptr,
/* ifFalse = */ nullptr));
if (!addPendingEdge(target1, current, MTest::TrueBranchIndex)) {
return false;
}
if (!addPendingEdge(target2, current, MTest::FalseBranchIndex)) {
return false;
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::buildBackedge() {
decLoopDepth();
MBasicBlock* header = loopStack_.popCopy().header();
current->end(MGoto::New(alloc(), header));
if (!header->setBackedge(current)) {
return false;
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::buildForwardGoto(BytecodeLocation target) {
current->end(MGoto::New(alloc(), nullptr));
if (!addPendingEdge(target, current, MGoto::TargetIndex)) {
return false;
}
setTerminatedBlock();
return true;
}
bool WarpBuilder::build_Goto(BytecodeLocation loc) {
if (loc.isBackedge()) {
return buildBackedge();
}
return buildForwardGoto(loc.getJumpTarget());
}
bool WarpBuilder::build_IsNullOrUndefined(BytecodeLocation loc) {
MDefinition* value = current->peek(-1);
auto* isNullOrUndef = MIsNullOrUndefined::New(alloc(), value);
current->add(isNullOrUndef);
current->push(isNullOrUndef);
return true;
}
bool WarpBuilder::build_DebugCheckSelfHosted(BytecodeLocation loc) {
#ifdef DEBUG
MDefinition* val = current->pop();
MDebugCheckSelfHosted* check = MDebugCheckSelfHosted::New(alloc(), val);
current->add(check);
current->push(check);
if (!resumeAfter(check, loc)) {
return false;
}
#endif
return true;
}
bool WarpBuilder::build_DynamicImport(BytecodeLocation loc) {
MDefinition* options = current->pop();
MDefinition* specifier = current->pop();
MDynamicImport* ins = MDynamicImport::New(alloc(), specifier, options);
current->add(ins);
current->push(ins);
return resumeAfter(ins, loc);
}
bool WarpBuilder::build_Not(BytecodeLocation loc) {
if (auto* cacheIRSnapshot = getOpSnapshot<WarpCacheIR>(loc)) {
// If we have CacheIR, we can use it to refine the input before
// emitting the MNot.
MDefinition* value = current->pop();
if (!TranspileCacheIRToMIR(this, loc, cacheIRSnapshot, {value})) {
return false;
}
}
MDefinition* value = current->pop();
MNot* ins = MNot::New(alloc(), value);
current->add(ins);
current->push(ins);
if (const auto* typesSnapshot = getOpSnapshot<WarpPolymorphicTypes>(loc)) {
ins->setObservedTypes(typesSnapshot->list());
}
return true;
}
bool WarpBuilder::build_ToString(BytecodeLocation loc) {
MDefinition* value = current->pop();
if (value->type() == MIRType::String) {
value->setImplicitlyUsedUnchecked();
current->push(value);
return true;
}
MToString* ins =
MToString::New(alloc(), value, MToString::SideEffectHandling::Supported);
current->add(ins);
current->push(ins);
if (ins->isEffectful()) {
return resumeAfter(ins, loc);
}
return true;
}
bool WarpBuilder::usesEnvironmentChain() const {
return script_->jitScript()->usesEnvironmentChain();
}
bool WarpBuilder::build_GlobalOrEvalDeclInstantiation(BytecodeLocation loc) {
MOZ_ASSERT(!script_->isForEval(), "Eval scripts not supported");
auto* redeclCheck = MGlobalDeclInstantiation::New(alloc());
current->add(redeclCheck);
return resumeAfter(redeclCheck, loc);
}
bool WarpBuilder::build_BindVar(BytecodeLocation) {
MOZ_ASSERT(usesEnvironmentChain());
MDefinition* env = current->environmentChain();
MCallBindVar* ins = MCallBindVar::New(alloc(), env);
current->add(ins);
current->push(ins);
return true;
}
bool WarpBuilder::build_MutateProto(BytecodeLocation loc) {
MDefinition* value = current->pop();
MDefinition* obj = current->peek(-1);
MMutateProto* mutate = MMutateProto::New(alloc(), obj, value);
current->add(mutate);
return resumeAfter(mutate, loc);
}
MDefinition* WarpBuilder::getCallee() {
if (inlineCallInfo()) {
return inlineCallInfo()->callee();
}
MInstruction* callee = MCallee::New(alloc());
current->add(callee);
return callee;
}
bool WarpBuilder::build_Callee(BytecodeLocation) {
MDefinition* callee = getCallee();
current->push(callee);
return true;
}
bool WarpBuilder::