Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BacktrackingAllocator.h"
#include <algorithm>
#include "jit/BitSet.h"
#include "jit/CompileInfo.h"
#include "js/Printf.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
// This is a big, complex file. Code is grouped into various sections, each
// preceded by a box comment. Sections not marked as "Misc helpers" are
// pretty much the top level flow, and are presented roughly in the same order
// in which the allocation pipeline operates. BacktrackingAllocator::go,
// right at the end of the file, is a good starting point.
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: linked-list management //
// //
///////////////////////////////////////////////////////////////////////////////
static inline bool SortBefore(UsePosition* a, UsePosition* b) {
return a->pos <= b->pos;
}
static inline bool SortBefore(LiveRange::BundleLink* a,
LiveRange::BundleLink* b) {
LiveRange* rangea = LiveRange::get(a);
LiveRange* rangeb = LiveRange::get(b);
MOZ_ASSERT(!rangea->intersects(rangeb));
return rangea->from() < rangeb->from();
}
static inline bool SortBefore(LiveRange::RegisterLink* a,
LiveRange::RegisterLink* b) {
return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
}
template <typename T>
static inline void InsertSortedList(InlineForwardList<T>& list, T* value) {
if (list.empty()) {
list.pushFront(value);
return;
}
if (SortBefore(list.back(), value)) {
list.pushBack(value);
return;
}
T* prev = nullptr;
for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
if (SortBefore(value, *iter)) {
break;
}
prev = *iter;
}
if (prev) {
list.insertAfter(prev, value);
} else {
list.pushFront(value);
}
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: methods for class SpillSet //
// //
///////////////////////////////////////////////////////////////////////////////
void SpillSet::setAllocation(LAllocation alloc) {
for (size_t i = 0; i < numSpilledBundles(); i++) {
spilledBundle(i)->setAllocation(alloc);
}
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: methods for class LiveRange //
// //
///////////////////////////////////////////////////////////////////////////////
static size_t SpillWeightFromUsePolicy(LUse::Policy policy) {
switch (policy) {
case LUse::ANY:
return 1000;
case LUse::REGISTER:
case LUse::FIXED:
return 2000;
default:
return 0;
}
}
inline void LiveRange::noteAddedUse(UsePosition* use) {
LUse::Policy policy = use->usePolicy();
usesSpillWeight_ += SpillWeightFromUsePolicy(policy);
if (policy == LUse::FIXED) {
++numFixedUses_;
}
}
inline void LiveRange::noteRemovedUse(UsePosition* use) {
LUse::Policy policy = use->usePolicy();
usesSpillWeight_ -= SpillWeightFromUsePolicy(policy);
if (policy == LUse::FIXED) {
--numFixedUses_;
}
MOZ_ASSERT_IF(!hasUses(), !usesSpillWeight_ && !numFixedUses_);
}
void LiveRange::addUse(UsePosition* use) {
MOZ_ASSERT(covers(use->pos));
InsertSortedList(uses_, use);
noteAddedUse(use);
}
UsePosition* LiveRange::popUse() {
UsePosition* ret = uses_.popFront();
noteRemovedUse(ret);
return ret;
}
void LiveRange::distributeUses(LiveRange* other) {
MOZ_ASSERT(&other->vreg() == &vreg());
MOZ_ASSERT(this != other);
// Move over all uses which fit in |other|'s boundaries.
for (UsePositionIterator iter = usesBegin(); iter;) {
UsePosition* use = *iter;
if (other->covers(use->pos)) {
uses_.removeAndIncrement(iter);
noteRemovedUse(use);
other->addUse(use);
} else {
iter++;
}
}
// Distribute the definition to |other| as well, if possible.
if (hasDefinition() && from() == other->from()) {
other->setHasDefinition();
}
}
bool LiveRange::contains(LiveRange* other) const {
return from() <= other->from() && to() >= other->to();
}
void LiveRange::intersect(LiveRange* other, Range* pre, Range* inside,
Range* post) const {
MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
CodePosition innerFrom = from();
if (from() < other->from()) {
if (to() < other->from()) {
*pre = range_;
return;
}
*pre = Range(from(), other->from());
innerFrom = other->from();
}
CodePosition innerTo = to();
if (to() > other->to()) {
if (from() >= other->to()) {
*post = range_;
return;
}
*post = Range(other->to(), to());
innerTo = other->to();
}
if (innerFrom != innerTo) {
*inside = Range(innerFrom, innerTo);
}
}
bool LiveRange::intersects(LiveRange* other) const {
Range pre, inside, post;
intersect(other, &pre, &inside, &post);
return !inside.empty();
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: methods for class LiveBundle //
// //
///////////////////////////////////////////////////////////////////////////////
#ifdef DEBUG
size_t LiveBundle::numRanges() const {
size_t count = 0;
for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
count++;
}
return count;
}
#endif
LiveRange* LiveBundle::rangeFor(CodePosition pos) const {
for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
if (range->covers(pos)) {
return range;
}
}
return nullptr;
}
void LiveBundle::addRange(LiveRange* range) {
MOZ_ASSERT(!range->bundle());
range->setBundle(this);
InsertSortedList(ranges_, &range->bundleLink);
}
bool LiveBundle::addRange(TempAllocator& alloc, VirtualRegister* vreg,
CodePosition from, CodePosition to) {
LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
if (!range) {
return false;
}
addRange(range);
return true;
}
bool LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc,
LiveRange* oldRange,
CodePosition from, CodePosition to) {
LiveRange* range = LiveRange::FallibleNew(alloc, &oldRange->vreg(), from, to);
if (!range) {
return false;
}
addRange(range);
oldRange->distributeUses(range);
return true;
}
LiveRange* LiveBundle::popFirstRange() {
LiveRange::BundleLinkIterator iter = rangesBegin();
if (!iter) {
return nullptr;
}
LiveRange* range = LiveRange::get(*iter);
ranges_.removeAt(iter);
range->setBundle(nullptr);
return range;
}
void LiveBundle::removeRange(LiveRange* range) {
for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
LiveRange* existing = LiveRange::get(*iter);
if (existing == range) {
ranges_.removeAt(iter);
return;
}
}
MOZ_CRASH();
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: methods for class VirtualRegister //
// //
///////////////////////////////////////////////////////////////////////////////
bool VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from,
CodePosition to, size_t* numRanges) {
MOZ_ASSERT(from < to);
// Mark [from,to) as a live range for this register during the initial
// liveness analysis, coalescing with any existing overlapping ranges.
// On some pathological graphs there might be a huge number of different
// live ranges. Allow non-overlapping live range to be merged if the
// number of ranges exceeds the cap below.
static const size_t CoalesceLimit = 100000;
LiveRange* prev = nullptr;
LiveRange* merged = nullptr;
for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter;) {
LiveRange* existing = LiveRange::get(*iter);
if (from > existing->to() && *numRanges < CoalesceLimit) {
// The new range should go after this one.
prev = existing;
iter++;
continue;
}
if (to.next() < existing->from()) {
// The new range should go before this one.
break;
}
if (!merged) {
// This is the first old range we've found that overlaps the new
// range. Extend this one to cover its union with the new range.
merged = existing;
if (from < existing->from()) {
existing->setFrom(from);
}
if (to > existing->to()) {
existing->setTo(to);
}
// Continue searching to see if any other old ranges can be
// coalesced with the new merged range.
iter++;
continue;
}
// Coalesce this range into the previous range we merged into.
MOZ_ASSERT(existing->from() >= merged->from());
if (existing->to() > merged->to()) {
merged->setTo(existing->to());
}
MOZ_ASSERT(!existing->hasDefinition());
existing->distributeUses(merged);
MOZ_ASSERT(!existing->hasUses());
ranges_.removeAndIncrement(iter);
}
if (!merged) {
// The new range does not overlap any existing range for the vreg.
LiveRange* range = LiveRange::FallibleNew(alloc, this, from, to);
if (!range) {
return false;
}
if (prev) {
ranges_.insertAfter(&prev->registerLink, &range->registerLink);
} else {
ranges_.pushFront(&range->registerLink);
}
(*numRanges)++;
}
return true;
}
void VirtualRegister::addInitialUse(UsePosition* use) {
LiveRange::get(*rangesBegin())->addUse(use);
}
void VirtualRegister::setInitialDefinition(CodePosition from) {
LiveRange* first = LiveRange::get(*rangesBegin());
MOZ_ASSERT(from >= first->from());
first->setFrom(from);
first->setHasDefinition();
}
LiveRange* VirtualRegister::rangeFor(CodePosition pos,
bool preferRegister /* = false */) const {
LiveRange* found = nullptr;
for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
if (range->covers(pos)) {
if (!preferRegister || range->bundle()->allocation().isRegister()) {
return range;
}
if (!found) {
found = range;
}
}
}
return found;
}
void VirtualRegister::addRange(LiveRange* range) {
InsertSortedList(ranges_, &range->registerLink);
}
void VirtualRegister::removeRange(LiveRange* range) {
for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
LiveRange* existing = LiveRange::get(*iter);
if (existing == range) {
ranges_.removeAt(iter);
return;
}
}
MOZ_CRASH();
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: queries about uses //
// //
///////////////////////////////////////////////////////////////////////////////
static inline LDefinition* FindReusingDefOrTemp(LNode* node,
LAllocation* alloc) {
if (node->isPhi()) {
MOZ_ASSERT(node->toPhi()->numDefs() == 1);
MOZ_ASSERT(node->toPhi()->getDef(0)->policy() !=
LDefinition::MUST_REUSE_INPUT);
return nullptr;
}
LInstruction* ins = node->toInstruction();
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc) {
return def;
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* def = ins->getTemp(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc) {
return def;
}
}
return nullptr;
}
bool BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins,
bool considerCopy) {
if (LDefinition* def = FindReusingDefOrTemp(ins, use)) {
return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
}
return false;
}
bool BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins,
bool considerCopy) {
switch (use->usePolicy()) {
case LUse::ANY:
return isReusedInput(use->use(), ins, considerCopy);
case LUse::REGISTER:
case LUse::FIXED:
return true;
default:
return false;
}
}
bool BacktrackingAllocator::isRegisterDefinition(LiveRange* range) {
if (!range->hasDefinition()) {
return false;
}
VirtualRegister& reg = range->vreg();
if (reg.ins()->isPhi()) {
return false;
}
if (reg.def()->policy() == LDefinition::FIXED &&
!reg.def()->output()->isRegister()) {
return false;
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: atomic LIR groups //
// //
///////////////////////////////////////////////////////////////////////////////
// The following groupings contain implicit (invisible to ::buildLivenessInfo)
// value flows, and therefore no split points may be requested inside them.
// This is an otherwise unstated part of the contract between LIR generation
// and the allocator.
//
// (1) (any insn) ; OsiPoint
//
// [Further group definitions and supporting code to come, pending rework
// of the wasm atomic-group situation.]
CodePosition RegisterAllocator::minimalDefEnd(LNode* ins) const {
// Compute the shortest interval that captures vregs defined by ins.
// Watch for instructions that are followed by an OSI point.
// If moves are introduced between the instruction and the OSI point then
// safepoint information for the instruction may be incorrect.
while (true) {
LNode* next = insData[ins->id() + 1];
if (!next->isOsiPoint()) {
break;
}
ins = next;
}
return outputOf(ins);
}
///////////////////////////////////////////////////////////////////////////////
// //
// Misc helpers: computation of bundle priorities and spill weights //
// //
///////////////////////////////////////////////////////////////////////////////
size_t BacktrackingAllocator::computePriority(LiveBundle* bundle) {
// The priority of a bundle is its total length, so that longer lived
// bundles will be processed before shorter ones (even if the longer ones
// have a low spill weight). See processBundle().
size_t lifetimeTotal = 0;
for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
iter++) {
LiveRange* range = LiveRange::get(*iter);
lifetimeTotal += range->to() - range->from();
}
return lifetimeTotal;
}
bool BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins) {
// Whether this is a minimal range capturing a definition at ins.
return (range->to() <= minimalDefEnd(ins).next()) &&
((!ins->isPhi() && range->from() == inputOf(ins)) ||
range->from() == outputOf(ins));
}
bool BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use) {
// Whether this is a minimal range capturing |use|.
LNode* ins = insData[use->pos];
return (range->from() == inputOf(ins)) &&
(range->to() ==
(use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
}
bool BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed) {
LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
LiveRange* range = LiveRange::get(*iter);
if (!range->hasVreg()) {
*pfixed = true;
return true;
}
// If a bundle contains multiple ranges, splitAtAllRegisterUses will split
// each range into a separate bundle.
if (++iter) {
return false;
}
if (range->hasDefinition()) {
VirtualRegister& reg = range->vreg();
if (pfixed) {
*pfixed = reg.def()->policy() == LDefinition::FIXED &&
reg.def()->output()->isRegister();
}
return minimalDef(range, reg.ins());
}
bool fixed = false, minimal = false, multiple = false;
for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
if (iter != range->usesBegin()) {
multiple = true;
}
switch (iter->usePolicy()) {
case LUse::FIXED:
if (fixed) {
return false;
}
fixed = true;
if (minimalUse(range, *iter)) {
minimal = true;
}
break;
case LUse::REGISTER:
if (minimalUse(range, *iter)) {
minimal = true;
}
break;
default:
break;
}
}
// If a range contains a fixed use and at least one other use,
// splitAtAllRegisterUses will split each use into a different bundle.
if (multiple && fixed) {
minimal = false;
}
if (pfixed) {
*pfixed = fixed;
}
return minimal;
}
size_t BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle) {
// Minimal bundles have an extremely high spill weight, to ensure they
// can evict any other bundles and be allocated to a register.
bool fixed;
if (minimalBundle(bundle, &fixed)) {
return fixed ? 2000000 : 1000000;
}
size_t usesTotal = 0;
fixed = false;
for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter;
iter++) {
LiveRange* range = LiveRange::get(*iter);
if (range->hasDefinition()) {
VirtualRegister& reg = range->vreg();
if (reg.def()->policy() == LDefinition::FIXED &&
reg.def()->output()->isRegister()) {
usesTotal += 2000;
fixed = true;
} else if (!reg.ins()->isPhi()) {
usesTotal += 2000;
}
}
usesTotal += range->usesSpillWeight();
if (range->numFixedUses() > 0) {
fixed = true;
}
}
// Bundles with fixed uses are given a higher spill weight, since they must
// be allocated to a specific register.
if (testbed && fixed) {
usesTotal *= 2;
}
// Compute spill weight as a use density, lowering the weight for long
// lived bundles with relatively few uses.
size_t lifetimeTotal = computePriority(bundle);
return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
}
size_t BacktrackingAllocator::maximumSpillWeight(
const LiveBundleVector& bundles) {
size_t maxWeight = 0;
for (size_t i = 0; i < bundles.length(); i++) {
maxWeight = std::max(maxWeight, computeSpillWeight(bundles[i]));
}
return maxWeight;
}
///////////////////////////////////////////////////////////////////////////////
// //
// Initialization of the allocator //
// //
///////////////////////////////////////////////////////////////////////////////
// This function pre-allocates and initializes as much global state as possible
// to avoid littering the algorithms with memory management cruft.
bool BacktrackingAllocator::init() {
if (!RegisterAllocator::init()) {
return false;
}
liveIn = mir->allocate<BitSet>(graph.numBlockIds());
if (!liveIn) {
return false;
}
size_t numVregs = graph.numVirtualRegisters();
if (!vregs.init(mir->alloc(), numVregs)) {
return false;
}
for (uint32_t i = 0; i < numVregs; i++) {
new (&vregs[i]) VirtualRegister();
}
// Build virtual register objects.
for (size_t i = 0; i < graph.numBlocks(); i++) {
if (mir->shouldCancel("Create data structures (main loop)")) {
return false;
}
LBlock* block = graph.getBlock(i);
for (LInstructionIterator ins = block->begin(); ins != block->end();
ins++) {
if (mir->shouldCancel("Create data structures (inner loop 1)")) {
return false;
}
for (size_t j = 0; j < ins->numDefs(); j++) {
LDefinition* def = ins->getDef(j);
if (def->isBogusTemp()) {
continue;
}
vreg(def).init(*ins, def, /* isTemp = */ false);
}
for (size_t j = 0; j < ins->numTemps(); j++) {
LDefinition* def = ins->getTemp(j);
if (def->isBogusTemp()) {
continue;
}
vreg(def).init(*ins, def, /* isTemp = */ true);
}
}
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi* phi = block->getPhi(j);
LDefinition* def = phi->getDef(0);
vreg(def).init(phi, def, /* isTemp = */ false);
}
}
LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
while (!remainingRegisters.emptyGeneral()) {
AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
registers[reg.code()].allocatable = true;
}
while (!remainingRegisters.emptyFloat()) {
AnyRegister reg =
AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());
registers[reg.code()].allocatable = true;
}
LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
for (size_t i = 0; i < AnyRegister::Total; i++) {
registers[i].reg = AnyRegister::FromCode(i);
registers[i].allocations.setAllocator(lifoAlloc);
}
hotcode.setAllocator(lifoAlloc);
callRanges.setAllocator(lifoAlloc);
// Partition the graph into hot and cold sections, for helping to make
// splitting decisions. Since we don't have any profiling data this is a
// crapshoot, so just mark the bodies of inner loops as hot and everything
// else as cold.
LBlock* backedge = nullptr;
for (size_t i = 0; i < graph.numBlocks(); i++) {
LBlock* block = graph.getBlock(i);
// If we see a loop header, mark the backedge so we know when we have
// hit the end of the loop. Don't process the loop immediately, so that
// if there is an inner loop we will ignore the outer backedge.
if (block->mir()->isLoopHeader()) {
backedge = block->mir()->backedge()->lir();
}
if (block == backedge) {
LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
LiveRange* range = LiveRange::FallibleNew(
alloc(), nullptr, entryOf(header), exitOf(block).next());
if (!range || !hotcode.insert(range)) {
return false;
}
}
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// //
// Liveness analysis //
// //
///////////////////////////////////////////////////////////////////////////////
// Helper for ::buildLivenessInfo
bool BacktrackingAllocator::addInitialFixedRange(AnyRegister reg,
CodePosition from,
CodePosition to) {
LiveRange* range = LiveRange::FallibleNew(alloc(), nullptr, from, to);
return range && registers[reg.code()].allocations.insert(range);
}
// Helper for ::buildLivenessInfo
#ifdef DEBUG
// Returns true iff ins has a def/temp reusing the input allocation.
static bool IsInputReused(LInstruction* ins, LUse* use) {
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use) {
return true;
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use) {
return true;
}
}
return false;
}
#endif
/*
* This function builds up liveness ranges for all virtual registers
* defined in the function.
*
* The algorithm is based on the one published in:
*
* Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
* SSA Form." Proceedings of the International Symposium on Code Generation
* and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
*
* The algorithm operates on blocks ordered such that dominators of a block
* are before the block itself, and such that all blocks of a loop are
* contiguous. It proceeds backwards over the instructions in this order,
* marking registers live at their uses, ending their live ranges at
* definitions, and recording which registers are live at the top of every
* block. To deal with loop backedges, registers live at the beginning of
* a loop gain a range covering the entire loop.
*/
bool BacktrackingAllocator::buildLivenessInfo() {
JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
BitSet loopDone(graph.numBlockIds());
if (!loopDone.init(alloc())) {
return false;
}
size_t numRanges = 0;
for (size_t i = graph.numBlocks(); i > 0; i--) {
if (mir->shouldCancel("Build Liveness Info (main loop)")) {
return false;
}
LBlock* block = graph.getBlock(i - 1);
MBasicBlock* mblock = block->mir();
BitSet& live = liveIn[mblock->id()];
new (&live) BitSet(graph.numVirtualRegisters());
if (!live.init(alloc())) {
return false;
}
// Propagate liveIn from our successors to us.
for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
// Skip backedges, as we fix them up at the loop header.
if (mblock->id() < successor->id()) {
live.insertAll(liveIn[successor->id()]);
}
}
// Add successor phis.
if (mblock->successorWithPhis()) {
LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
LPhi* phi = phiSuccessor->getPhi(j);
LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
uint32_t reg = use->toUse()->virtualRegister();
live.insert(reg);
vreg(use).setUsedByPhi();
}
}
// Registers are assumed alive for the entire block, a define shortens
// the range to the point of definition.
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block),
exitOf(block).next(), &numRanges))
return false;
}
// Shorten the front end of ranges for live variables to their point of
// definition, if found.
for (LInstructionReverseIterator ins = block->rbegin();
ins != block->rend(); ins++) {
// Calls may clobber registers, so force a spill and reload around the
// callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more();
++iter) {
bool found = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->isFixed() &&
ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
found = true;
break;
}
}
// If this register doesn't have an explicit def above, mark
// it as clobbered by the call unless it is actually
// call-preserved.
if (!found && !ins->isCallPreserved(*iter)) {
if (!addInitialFixedRange(*iter, outputOf(*ins),
outputOf(*ins).next())) {
return false;
}
}
}
CallRange* callRange = new (alloc().fallible())
CallRange(outputOf(*ins), outputOf(*ins).next());
if (!callRange) {
return false;
}
callRangesList.pushFront(callRange);
if (!callRanges.insert(callRange)) {
return false;
}
}
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->isBogusTemp()) {
continue;
}
CodePosition from = outputOf(*ins);
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
// MUST_REUSE_INPUT is implemented by allocating an output
// register and moving the input to it. Register hints are
// used to avoid unnecessary moves. We give the input an
// LUse::ANY policy to avoid allocating a register for the
// input.
LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
MOZ_ASSERT(inputUse->usedAtStart());
*inputUse = LUse(inputUse->virtualRegister(), LUse::ANY,
/* usedAtStart = */ true);
}
if (!vreg(def).addInitialRange(alloc(), from, from.next(),
&numRanges)) {
return false;
}
vreg(def).setInitialDefinition(from);
live.remove(def->virtualRegister());
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* temp = ins->getTemp(i);
if (temp->isBogusTemp()) {
continue;
}
// Normally temps are considered to cover both the input
// and output of the associated instruction. In some cases
// though we want to use a fixed register as both an input
// and clobbered register in the instruction, so watch for
// this and shorten the temp to cover only the output.
CodePosition from = inputOf(*ins);
if (temp->policy() == LDefinition::FIXED) {
AnyRegister reg = temp->output()->toRegister();
for (LInstruction::InputIterator alloc(**ins); alloc.more();
alloc.next()) {
if (alloc->isUse()) {
LUse* use = alloc->toUse();
if (use->isFixedRegister()) {
if (GetFixedRegister(vreg(use).def(), use) == reg) {
from = outputOf(*ins);
}
}
}
}
}
// * For non-call instructions, temps cover both the input and output,
// so temps never alias uses (even at-start uses) or defs.
// * For call instructions, temps only cover the input (the output is
// used for the force-spill ranges added above). This means temps
// still don't alias uses but they can alias the (fixed) defs. For now
// we conservatively require temps to have a fixed register for call
// instructions to prevent a footgun.
MOZ_ASSERT_IF(ins->isCall(), temp->policy() == LDefinition::FIXED);
CodePosition to =
ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
if (!vreg(temp).addInitialRange(alloc(), from, to, &numRanges)) {
return false;
}
vreg(temp).setInitialDefinition(from);
}
DebugOnly<bool> hasUseRegister = false;
DebugOnly<bool> hasUseRegisterAtStart = false;
for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more();
inputAlloc.next()) {
if (inputAlloc->isUse()) {
LUse* use = inputAlloc->toUse();
// Call uses should always be at-start, since calls use all
// registers.
MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
use->usedAtStart());
#ifdef DEBUG
// If there are both useRegisterAtStart(x) and useRegister(y)
// uses, we may assign the same register to both operands
// (bug 772830). Don't allow this for now.
if (use->policy() == LUse::REGISTER) {
if (use->usedAtStart()) {
if (!IsInputReused(*ins, use)) {
hasUseRegisterAtStart = true;
}
} else {
hasUseRegister = true;
}
}
MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif
// Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
if (use->policy() == LUse::RECOVERED_INPUT) {
continue;
}
CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
if (use->isFixedRegister()) {
LAllocation reg(AnyRegister::FromCode(use->registerCode()));
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::FIXED &&
*def->output() == reg) {
to = inputOf(*ins);
}
}
}
if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next(),
&numRanges)) {
return false;
}
UsePosition* usePosition =
new (alloc().fallible()) UsePosition(use, to);
if (!usePosition) {
return false;
}
vreg(use).addInitialUse(usePosition);
live.insert(use->virtualRegister());
}
}
}
// Phis have simultaneous assignment semantics at block begin, so at
// the beginning of the block we can be sure that liveIn does not
// contain any phi outputs.
for (unsigned int i = 0; i < block->numPhis(); i++) {
LDefinition* def = block->getPhi(i)->getDef(0);
if (live.contains(def->virtualRegister())) {
live.remove(def->virtualRegister());
} else {
// This is a dead phi, so add a dummy range over all phis. This
// can go away if we have an earlier dead code elimination pass.
CodePosition entryPos = entryOf(block);
if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next(),
&numRanges)) {
return false;
}
}
}
if (mblock->isLoopHeader()) {
// A divergence from the published algorithm is required here, as
// our block order does not guarantee that blocks of a loop are
// contiguous. As a result, a single live range spanning the
// loop is not possible. Additionally, we require liveIn in a later
// pass for resolution, so that must also be fixed up here.
MBasicBlock* loopBlock = mblock->backedge();
while (true) {
// Blocks must already have been visited to have a liveIn set.
MOZ_ASSERT(loopBlock->id() >= mblock->id());
// Add a range for this entire loop block
CodePosition from = entryOf(loopBlock->lir());
CodePosition to = exitOf(loopBlock->lir()).next();
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].addInitialRange(alloc(), from, to,
&numRanges)) {
return false;
}
}
// Fix up the liveIn set.
liveIn[loopBlock->id()].insertAll(live);
// Make sure we don't visit this node again
loopDone.insert(loopBlock->id());
// If this is the loop header, any predecessors are either the
// backedge or out of the loop, so skip any predecessors of
// this block
if (loopBlock != mblock) {
for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
MBasicBlock* pred = loopBlock->getPredecessor(i);
if (loopDone.contains(pred->id())) {
continue;
}
if (!loopWorkList.append(pred)) {
return false;
}
}
}
// Terminate loop if out of work.
if (loopWorkList.empty()) {
break;
}
// Grab the next block off the work list, skipping any OSR block.
MBasicBlock* osrBlock = graph.mir().osrBlock();
while (!loopWorkList.empty()) {
loopBlock = loopWorkList.popCopy();
if (loopBlock != osrBlock) {
break;
}
}
// If end is reached without finding a non-OSR block, then no more work
// items were found.
if (loopBlock == osrBlock) {
MOZ_ASSERT(loopWorkList.empty());
break;
}
}
// Clear the done set for other loops
loopDone.clear();
}
MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
}
JitSpew(JitSpew_RegAlloc, "Completed liveness analysis");
return true;
}
///////////////////////////////////////////////////////////////////////////////
// //
// Merging and queueing of LiveRange groups //
// //
///////////////////////////////////////////////////////////////////////////////
// Helper for ::tryMergeBundles
static bool IsArgumentSlotDefinition(LDefinition* def) {
return def->policy() == LDefinition::FIXED && def->output()->isArgument();
}
// Helper for ::tryMergeBundles
static bool IsThisSlotDefinition(LDefinition* def) {
return IsArgumentSlotDefinition(def) &&
def->output()->toArgument()->index() <
THIS_FRAME_ARGSLOT + sizeof(Value);
}
// Helper for ::tryMergeBundles
static bool HasStackPolicy(LDefinition* def) {
return def->policy() == LDefinition::STACK;
}
// Helper for ::tryMergeBundles
static bool CanMergeTypesInBundle(LDefinition::Type a, LDefinition::Type b) {
// Fast path for the common case.
if (a == b) {
return true;
}
// Only merge if the sizes match, so that we don't get confused about the
// width of spill slots.
return StackSlotAllocator::width(a) == StackSlotAllocator::width(b);
}
// Helper for ::tryMergeReusedRegister
bool BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0,
LiveBundle* bundle1) {
// See if bundle0 and bundle1 can be merged together.
if (bundle0 == bundle1) {
return true;
}
// Get a representative virtual register from each bundle.
VirtualRegister& reg0 = bundle0->firstRange()->vreg();
VirtualRegister& reg1 = bundle1->firstRange()->vreg();
MOZ_ASSERT(CanMergeTypesInBundle(reg0.type(), reg1.type()));
MOZ_ASSERT(reg0.isCompatible(reg1));
// Registers which might spill to the frame's |this| slot can only be
// grouped with other such registers. The frame's |this| slot must always
// hold the |this| value, as required by JitFrame tracing and by the Ion
// constructor calling convention.
if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
if (*reg0.def()->output() != *reg1.def()->output()) {
return true;
}
}
// Registers which might spill to the frame's argument slots can only be
// grouped with other such registers if the frame might access those
// arguments through a lazy arguments object or rest parameter.
if (IsArgumentSlotDefinition(reg0.def()) ||
IsArgumentSlotDefinition(reg1.def())) {
if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
if (*reg0.def()->output() != *reg1.def()->output()) {
return true;
}
}
}
// When we make a call to a WebAssembly function that returns multiple
// results, some of those results can go on the stack. The callee is passed a
// pointer to this stack area, which is represented as having policy
// LDefinition::STACK (with type LDefinition::STACKRESULTS). Individual
// results alias parts of the stack area with a value-appropriate type, but
// policy LDefinition::STACK. This aliasing between allocations makes it
// unsound to merge anything with a LDefinition::STACK policy.
if (HasStackPolicy(reg0.def()) || HasStackPolicy(reg1.def())) {
return true;
}
// Limit the number of times we compare ranges if there are many ranges in
// one of the bundles, to avoid quadratic behavior.
static const size_t MAX_RANGES = 200;
// Make sure that ranges in the bundles do not overlap.
LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(),
iter1 = bundle1->rangesBegin();
size_t count = 0;
while (iter0 && iter1) {
if (++count >= MAX_RANGES) {
return true;
}
LiveRange* range0 = LiveRange::get(*iter0);
LiveRange* range1 = LiveRange::get(*iter1);
if (range0->from() >= range1->to()) {
iter1++;
} else if (range1->from() >= range0->to()) {
iter0++;
} else {
return true;
}
}
// Move all ranges from bundle1 into bundle0.
while (LiveRange* range = bundle1->popFirstRange()) {
bundle0->addRange(range);
}
return true;
}
// Helper for ::mergeAndQueueRegisters
void BacktrackingAllocator::allocateStackDefinition(VirtualRegister& reg) {
LInstruction* ins = reg.ins()->toInstruction();
if (reg.def()->type() == LDefinition::STACKRESULTS) {
LStackArea alloc(ins->toInstruction());
stackSlotAllocator.allocateStackArea(&alloc);
reg.def()->setOutput(alloc);
} else {
// Because the definitions are visited in order, the area has been allocated
// before we reach this result, so we know the operand is an LStackArea.
const LUse* use = ins->getOperand(0)->toUse();
VirtualRegister& area = vregs[use->virtualRegister()];
const LStackArea* areaAlloc = area.def()->output()->toStackArea();
reg.def()->setOutput(areaAlloc->resultAlloc(ins, reg.def()));
}
}
// Helper for ::mergeAndQueueRegisters
bool BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def,
VirtualRegister& input) {
// def is a vreg which reuses input for its output physical register. Try
// to merge ranges for def with those of input if possible, as avoiding
// copies before def's instruction is crucial for generated code quality
// (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
if (def.rangeFor(inputOf(def.ins()))) {
MOZ_ASSERT(def.isTemp());
def.setMustCopyInput();
return true;
}
if (!CanMergeTypesInBundle(def.type(), input.type())) {
def.setMustCopyInput();
return true;
}
LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
if (!inputRange) {
// The input is not live after the instruction, either in a safepoint
// for the instruction or in subsequent code. The input and output
// can thus be in the same group.
return tryMergeBundles(def.firstBundle(), input.firstBundle());
}
// Avoid merging in very large live ranges as merging has non-linear
// complexity. The cutoff value is hard to gauge. 1M was chosen because it
// is "large" and yet usefully caps compile time on AutoCad-for-the-web to
// something reasonable on a 2017-era desktop system.
const uint32_t RANGE_SIZE_CUTOFF = 1000000;
if (inputRange->to() - inputRange->from() > RANGE_SIZE_CUTOFF) {
def.setMustCopyInput();
return true;
}
// The input is live afterwards, either in future instructions or in a
// safepoint for the reusing instruction. This is impossible to satisfy
// without copying the input.
//
// It may or may not be better to split the input into two bundles at the
// point of the definition, which may permit merging. One case where it is
// definitely better to split is if the input never has any register uses
// after the instruction. Handle this splitting eagerly.
LBlock* block = def.ins()->block();
// The input's lifetime must end within the same block as the definition,
// otherwise it could live on in phis elsewhere.
if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
def.setMustCopyInput();
return true;
}
// If we already split the input for some other register, don't make a
// third bundle.
if (inputRange->bundle() != input.firstRange()->bundle()) {
def.setMustCopyInput();
return true;
}
// If the input will start out in memory then adding a separate bundle for
// memory uses after the def won't help.
if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
def.setMustCopyInput();
return true;
}
// The input cannot have register or reused uses after the definition.
for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
if (iter->pos <= inputOf(def.ins())) {
continue;
}
LUse* use = iter->use();
if (FindReusingDefOrTemp(insData[iter->pos], use)) {
def.setMustCopyInput();
return true;
}
if (iter->usePolicy() != LUse::ANY &&
iter->usePolicy() != LUse::KEEPALIVE) {
def.setMustCopyInput();
return true;
}
}
LiveRange* preRange = LiveRange::FallibleNew(
alloc(), &input, inputRange->from(), outputOf(def.ins()));
if (!preRange) {
return false;
}
// The new range starts at reg's input position, which means it overlaps
// with the old range at one position. This is what we want, because we
// need to copy the input before the instruction.
LiveRange* postRange = LiveRange::FallibleNew(
alloc(), &input, inputOf(def.ins()), inputRange->to());
if (!postRange) {
return false;
}
inputRange->distributeUses(preRange);
inputRange->distributeUses(postRange);
MOZ_ASSERT(!inputRange->hasUses());
JitSpewIfEnabled(JitSpew_RegAlloc,
" splitting reused input at %u to try to help grouping",
inputOf(def.ins()).bits());
LiveBundle* firstBundle = inputRange->bundle();
input.removeRange(inputRange);
input.addRange(preRange);
input.addRange(postRange);
firstBundle->removeRange(inputRange);
firstBundle->addRange(preRange);
// The new range goes in a separate bundle, where it will be spilled during
// allocation.
LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
if (!secondBundle) {
return false;
}
secondBundle->addRange(postRange);
return tryMergeBundles(def.firstBundle(), input.firstBundle());
}
bool BacktrackingAllocator::mergeAndQueueRegisters() {
MOZ_ASSERT(!vregs[0u].hasRanges());
// Create a bundle for each register containing all its ranges.
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister& reg = vregs[i];
if (!reg.hasRanges()) {
continue;
}
LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
if (!bundle) {
return false;
}
for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
iter++) {
LiveRange* range = LiveRange::get(*iter);
bundle->addRange(range);
}
}
// If there is an OSR block, merge parameters in that block with the
// corresponding parameters in the initial block.
if (MBasicBlock* osr = graph.mir().osrBlock()) {
size_t original = 1;
for (LInstructionIterator iter = osr->lir()->begin();
iter != osr->lir()->end(); iter++) {
if (iter->isParameter()) {
for (size_t i = 0; i < iter->numDefs(); i++) {
DebugOnly<bool> found = false;
VirtualRegister& paramVreg = vreg(iter->getDef(i));
for (; original < paramVreg.vreg(); original++) {
VirtualRegister& originalVreg = vregs[original];
if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
MOZ_ASSERT(originalVreg.ins()->isParameter());
if (!tryMergeBundles(originalVreg.firstBundle(),
paramVreg.firstBundle())) {
return false;
}
found = true;
break;
}
}
MOZ_ASSERT(found);
}
}
}
}
// Try to merge registers with their reused inputs.
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister& reg = vregs[i];
if (!reg.hasRanges()) {
continue;
}
if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
LUse* use = reg.ins()
->toInstruction()
->getOperand(reg.def()->getReusedInput())
->toUse();
if (!tryMergeReusedRegister(reg, vreg(use))) {
return false;
}
}
}
// Try to merge phis with their inputs.
for (size_t i = 0; i < graph.numBlocks(); i++) {
LBlock* block = graph.getBlock(i);
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi* phi = block->getPhi(j);
VirtualRegister& outputVreg = vreg(phi->getDef(0));
for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
if (!tryMergeBundles(inputVreg.firstBundle(),
outputVreg.firstBundle())) {
return false;
}
}
}
}
// Add all bundles to the allocation queue, and create spill sets for them.
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister& reg = vregs[i];
// Eagerly allocate stack result areas and their component stack results.
if (reg.def() && reg.def()->policy() == LDefinition::STACK) {
allocateStackDefinition(reg);
}
for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter;
iter++) {
LiveRange* range = LiveRange::get(*iter);
LiveBundle* bundle = range->bundle();
if (range == bundle->firstRange()) {
if (!alloc().ensureBallast()) {
return false;
}
SpillSet* spill = SpillSet::New(alloc());
if (!spill) {