Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmIonCompile.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jit/CodeGenerator.h"
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmOpIter.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::IsPowerOfTwo;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
namespace {
typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
typedef Vector<MDefinition*, 8, SystemAllocPolicy> DefVector;
struct IonCompilePolicy {
// We store SSA definitions in the value stack.
using Value = MDefinition*;
using ValueVector = DefVector;
// We store loop headers and then/else blocks in the control flow stack.
using ControlItem = MBasicBlock*;
};
using IonOpIter = OpIter<IonCompilePolicy>;
class FunctionCompiler;
// CallCompileState describes a call that is being compiled.
class CallCompileState {
// A generator object that is passed each argument as it is compiled.
ABIArgGenerator abi_;
// Accumulates the register arguments while compiling arguments.
MWasmCall::Args regArgs_;
// Reserved argument for passing Instance* to builtin instance method calls.
ABIArg instanceArg_;
// The stack area in which the callee will write stack return values, or
// nullptr if no stack results.
MWasmStackResultArea* stackResultArea_ = nullptr;
// Only FunctionCompiler should be directly manipulating CallCompileState.
friend class FunctionCompiler;
};
// Encapsulates the compilation of a single function in an asm.js module. The
// function compiler handles the creation and final backend compilation of the
// MIR graph.
class FunctionCompiler {
struct ControlFlowPatch {
MControlInstruction* ins;
uint32_t index;
ControlFlowPatch(MControlInstruction* ins, uint32_t index)
: ins(ins), index(index) {}
};
typedef Vector<ControlFlowPatch, 0, SystemAllocPolicy> ControlFlowPatchVector;
typedef Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>
ControlFlowPatchsVector;
const ModuleEnvironment& env_;
IonOpIter iter_;
const FuncCompileInput& func_;
const ValTypeVector& locals_;
size_t lastReadCallSite_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
MBasicBlock* curBlock_;
uint32_t maxStackArgBytes_;
uint32_t loopDepth_;
uint32_t blockDepth_;
ControlFlowPatchsVector blockPatches_;
// TLS pointer argument to the current function.
MWasmParameter* tlsPointer_;
MWasmParameter* stackResultPointer_;
public:
FunctionCompiler(const ModuleEnvironment& env, Decoder& decoder,
const FuncCompileInput& func, const ValTypeVector& locals,
MIRGenerator& mirGen)
: env_(env),
iter_(env, decoder),
func_(func),
locals_(locals),
lastReadCallSite_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
info_(mirGen.outerInfo()),
mirGen_(mirGen),
curBlock_(nullptr),
maxStackArgBytes_(0),
loopDepth_(0),
blockDepth_(0),
tlsPointer_(nullptr),
stackResultPointer_(nullptr) {}
const ModuleEnvironment& env() const { return env_; }
IonOpIter& iter() { return iter_; }
TempAllocator& alloc() const { return alloc_; }
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex() const { return func_.index; }
const FuncType& funcType() const { return *env_.funcTypes[func_.index]; }
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
BytecodeOffset bytecodeIfNotAsmJS() const {
return env_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
}
bool init() {
// Prepare the entry block for MIR generation:
const ArgTypeVector args(funcType());
if (!mirGen_.ensureBallast()) {
return false;
}
if (!newBlock(/* prev */ nullptr, &curBlock_)) {
return false;
}
for (ABIArgIter i(args); !i.done(); i++) {
MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
curBlock_->add(ins);
if (args.isSyntheticStackResultPointerArg(i.index())) {
MOZ_ASSERT(stackResultPointer_ == nullptr);
stackResultPointer_ = ins;
} else {
curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
ins);
}
if (!mirGen_.ensureBallast()) {
return false;
}
}
// Set up a parameter that receives the hidden TLS pointer argument.
tlsPointer_ =
MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
curBlock_->add(tlsPointer_);
if (!mirGen_.ensureBallast()) {
return false;
}
for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
i++) {
MInstruction* ins = nullptr;
switch (locals_[i].kind()) {
case ValType::I32:
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
break;
case ValType::I64:
ins = MConstant::NewInt64(alloc(), 0);
break;
case ValType::V128:
#ifdef ENABLE_WASM_SIMD
ins =
MWasmFloatConstant::NewSimd128(alloc(), SimdConstant::SplatX4(0));
break;
#else
return iter().fail("Ion has no SIMD support yet");
#endif
case ValType::F32:
ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
break;
case ValType::F64:
ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
break;
case ValType::Ref:
ins = MWasmNullConstant::New(alloc());
break;
}
curBlock_->add(ins);
curBlock_->initSlot(info().localSlot(i), ins);
if (!mirGen_.ensureBallast()) {
return false;
}
}
return true;
}
void finish() {
mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
MOZ_ASSERT(loopDepth_ == 0);
MOZ_ASSERT(blockDepth_ == 0);
#ifdef DEBUG
for (ControlFlowPatchVector& patches : blockPatches_) {
MOZ_ASSERT(patches.empty());
}
#endif
MOZ_ASSERT(inDeadCode());
MOZ_ASSERT(done(), "all bytes must be consumed");
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
}
/************************* Read-only interface (after local scope setup) */
MIRGenerator& mirGen() const { return mirGen_; }
MIRGraph& mirGraph() const { return graph_; }
const CompileInfo& info() const { return info_; }
MDefinition* getLocalDef(unsigned slot) {
if (inDeadCode()) {
return nullptr;
}
return curBlock_->getSlot(info().localSlot(slot));
}
const ValTypeVector& locals() const { return locals_; }
/***************************** Code generation (after local scope setup) */
MDefinition* constant(const Value& v, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::New(alloc(), v, type);
curBlock_->add(constant);
return constant;
}
MDefinition* constant(float f) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(double d) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(int64_t i) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::NewInt64(alloc(), i);
curBlock_->add(constant);
return constant;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* constant(V128 v) {
if (inDeadCode()) {
return nullptr;
}
MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
curBlock_->add(constant);
return constant;
}
#endif
MDefinition* nullRefConstant() {
if (inDeadCode()) {
return nullptr;
}
// MConstant has a lot of baggage so we don't use that here.
MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
curBlock_->add(constant);
return constant;
}
void fence() {
if (inDeadCode()) {
return;
}
MWasmFence* ins = MWasmFence::New(alloc());
curBlock_->add(ins);
}
template <class T>
MDefinition* unary(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* unary(MDefinition* op, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
bool mustPreserveNaN(MIRType type) {
return IsFloatingPointType(type) && !env().isAsmJS();
}
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x - 0.0 because of NaN with custom payloads.
MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
curBlock_->add(ins);
return ins;
}
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool isMax) {
if (inDeadCode()) {
return nullptr;
}
if (mustPreserveNaN(type)) {
// Convert signaling NaN to quiet NaNs.
MDefinition* zero = constant(DoubleValue(0.0), type);
lhs = sub(lhs, zero, type);
rhs = sub(rhs, zero, type);
}
MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
curBlock_->add(ins);
return ins;
}
MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
MMul::Mode mode) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x * 1.0 because of NaN with custom payloads.
auto* ins =
MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !env().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// Enforce the signedness of the operation by coercing the operands
// to signed. Otherwise, operands that "look" unsigned to Ion but
// are not unsigned to Baldr (eg, unsigned right shifts) may lead to
// the operation being executed unsigned. Applies to mod() as well.
//
// Do this for Int32 only since Int64 is not subject to the same
// issues.
//
// Note the offsets passed to MTruncateToInt32 are wrong here, but
// it doesn't matter: they're not codegen'd to calls since inputs
// already are int32.
auto* lhs2 = MTruncateToInt32::New(alloc(), lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = MTruncateToInt32::New(alloc(), rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset(), mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !env().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// See block comment in div().
auto* lhs2 = MTruncateToInt32::New(alloc(), lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = MTruncateToInt32::New(alloc(), rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset());
curBlock_->add(ins);
return ins;
}
MDefinition* bitnot(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MBitNot::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
MDefinition* condExpr) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
curBlock_->add(ins);
return ins;
}
MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
curBlock_->add(ins);
return ins;
}
MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
uint32_t targetSize) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* ins;
switch (targetSize) {
case 4: {
MSignExtendInt32::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt32::Byte;
break;
case 2:
mode = MSignExtendInt32::Half;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt32::New(alloc(), op, mode);
break;
}
case 8: {
MSignExtendInt64::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt64::Byte;
break;
case 2:
mode = MSignExtendInt64::Half;
break;
case 4:
mode = MSignExtendInt64::Word;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt64::New(alloc(), op, mode);
break;
}
default: {
MOZ_CRASH("Bad sign extension");
}
}
curBlock_->add(ins);
return ins;
}
MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
isUnsigned);
curBlock_->add(ins);
return ins;
}
MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
bool left) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MRotate::New(alloc(), input, count, type, left);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* truncate(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
MCompare::CompareType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MCompare::New(alloc(), lhs, rhs, op, type);
curBlock_->add(ins);
return ins;
}
void assign(unsigned slot, MDefinition* def) {
if (inDeadCode()) {
return;
}
curBlock_->setSlot(info().localSlot(slot), def);
}
#ifdef ENABLE_WASM_SIMD
// About Wasm SIMD as supported by Ion:
//
// The expectation is that Ion will only ever support SIMD on x86 and x64,
// since Cranelift will be the optimizing compiler for Arm64, ARMv7 will cease
// to be a tier-1 platform soon, and MIPS32 and MIPS64 will never implement
// SIMD.
//
// The division of the operations into MIR nodes reflects that expectation,
// and is a good fit for x86/x64. Should the expectation change we'll
// possibly want to re-architect the SIMD support to be a little more general.
//
// Most SIMD operations map directly to a single MIR node that ultimately ends
// up being expanded in the macroassembler.
//
// Some SIMD operations that do have a complete macroassembler expansion are
// open-coded into multiple MIR nodes here; in some cases that's just
// convenience, in other cases it may also allow them to benefit from Ion
// optimizations. The reason for the expansions will be documented by a
// comment.
// (v128,v128) -> v128 effect-free binary operations
MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
bool commutative, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Simd128);
auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
curBlock_->add(ins);
return ins;
}
// (v128,i32) -> v128 effect-free shift operations
MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Int32);
// Do something vector-based when the platform allows it.
if ((rhs->isConstant() && !MacroAssembler::MustScalarizeShiftSimd128(
op, Imm32(rhs->toConstant()->toInt32()))) ||
(!rhs->isConstant() &&
!MacroAssembler::MustScalarizeShiftSimd128(op))) {
int32_t maskBits;
if (!rhs->isConstant() &&
MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
MConstant* mask = MConstant::New(alloc(), Int32Value(maskBits));
curBlock_->add(mask);
MBitAnd* maskedShift = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
curBlock_->add(maskedShift);
rhs = maskedShift;
}
auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
curBlock_->add(ins);
return ins;
}
# ifdef DEBUG
js::wasm::ReportSimdAnalysis("shift -> variable scalarized shift");
# endif
// Otherwise just scalarize using existing primitive operations.
auto* lane0 = reduceSimd128(lhs, SimdOp::I64x2ExtractLane, ValType::I64, 0);
auto* lane1 = reduceSimd128(lhs, SimdOp::I64x2ExtractLane, ValType::I64, 1);
auto* shiftCount = extendI32(rhs, /*isUnsigned=*/false);
auto* shifted0 = binary<MRsh>(lane0, shiftCount, MIRType::Int64);
auto* shifted1 = binary<MRsh>(lane1, shiftCount, MIRType::Int64);
V128 zero;
auto* res0 = constant(zero);
auto* res1 =
replaceLaneSimd128(res0, shifted0, 0, SimdOp::I64x2ReplaceLane);
auto* ins = replaceLaneSimd128(res1, shifted1, 1, SimdOp::I64x2ReplaceLane);
return ins;
}
// (v128,scalar,imm) -> v128
MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
uint32_t laneIndex, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128);
auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
curBlock_->add(ins);
return ins;
}
// (scalar) -> v128 effect-free unary operations
MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128) -> v128 effect-free unary operations
MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128, imm) -> scalar effect-free unary operations
MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
uint32_t imm = 0) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins =
MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
curBlock_->add(ins);
return ins;
}
// (v128, v128, v128) -> v128 effect-free operations
MDefinition* bitselectSimd128(MDefinition* v1, MDefinition* v2,
MDefinition* control) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v1->type() == MIRType::Simd128);
MOZ_ASSERT(v2->type() == MIRType::Simd128);
MOZ_ASSERT(control->type() == MIRType::Simd128);
auto* ins = MWasmBitselectSimd128::New(alloc(), v1, v2, control);
curBlock_->add(ins);
return ins;
}
// (v128, v128, imm_v128) -> v128 effect-free operations
MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v1->type() == MIRType::Simd128);
MOZ_ASSERT(v2->type() == MIRType::Simd128);
auto* ins = MWasmShuffleSimd128::New(
alloc(), v1, v2,
SimdConstant::CreateX16(reinterpret_cast<int8_t*>(control.bytes)));
curBlock_->add(ins);
return ins;
}
MDefinition* loadSplatSimd128(Scalar::Type viewType,
const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp splatOp) {
if (inDeadCode()) {
return nullptr;
}
// Expand load-and-splat as integer load followed by splat.
MemoryAccessDesc access(viewType, addr.align, addr.offset,
bytecodeIfNotAsmJS());
ValType resultType =
viewType == Scalar::Int64 ? ValType::I64 : ValType::I32;
auto* scalar = load(addr.base, &access, resultType);
if (!inDeadCode() && !scalar) {
return nullptr;
}
return scalarToSimd128(scalar, splatOp);
}
MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(Scalar::Int64, addr.align, addr.offset,
bytecodeIfNotAsmJS());
// Expand load-and-extend as integer load followed by widen.
auto* scalar = load(addr.base, &access, ValType::I64);
if (!inDeadCode() && !scalar) {
return nullptr;
}
return scalarToSimd128(scalar, op);
}
#endif // ENABLE_WASM_SIMD
private:
MWasmLoadTls* maybeLoadMemoryBase() {
MWasmLoadTls* load = nullptr;
#ifdef JS_CODEGEN_X86
AliasSet aliases = env_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
load = MWasmLoadTls::New(alloc(), tlsPointer_,
offsetof(wasm::TlsData, memoryBase),
MIRType::Pointer, aliases);
curBlock_->add(load);
#endif
return load;
}
MWasmLoadTls* maybeLoadBoundsCheckLimit() {
if (env_.hugeMemoryEnabled()) {
return nullptr;
}
AliasSet aliases = env_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
auto load = MWasmLoadTls::New(alloc(), tlsPointer_,
offsetof(wasm::TlsData, boundsCheckLimit),
MIRType::Int32, aliases);
curBlock_->add(load);
return load;
}
public:
MWasmHeapBase* memoryBase() {
MWasmHeapBase* base = nullptr;
AliasSet aliases = env_.maxMemoryLength.isSome()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
curBlock_->add(base);
return base;
}
private:
// Only sets *mustAdd if it also returns true.
bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
bool* mustAdd) {
MOZ_ASSERT(!*mustAdd);
// asm.js accesses are always aligned and need no checks.
if (env_.isAsmJS() || !access->isAtomic()) {
return false;
}
if (base->isConstant()) {
int32_t ptr = base->toConstant()->toInt32();
// OK to wrap around the address computation here.
if (((ptr + access->offset()) & (access->byteSize() - 1)) == 0) {
return false;
}
}
*mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
return true;
}
void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
MDefinition** base) {
MOZ_ASSERT(!inDeadCode());
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
// Fold a constant base into the offset (so the base is 0 in which case
// the codegen is optimized), if it doesn't wrap or trigger an
// MWasmAddOffset.
if ((*base)->isConstant()) {
uint32_t basePtr = (*base)->toConstant()->toInt32();
uint32_t offset = access->offset();
if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
curBlock_->add(ins);
*base = ins;
access->setOffset(access->offset() + basePtr);
}
}
bool mustAdd = false;
bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
// If the offset is bigger than the guard region, a separate instruction
// is necessary to add the offset to the base and check for overflow.
//
// Also add the offset if we have a Wasm atomic access that needs
// alignment checking and the offset affects alignment.
if (access->offset() >= offsetGuardLimit || mustAdd ||
!JitOptions.wasmFoldOffsets) {
*base = computeEffectiveAddress(*base, access);
}
if (alignmentCheck) {
curBlock_->add(MWasmAlignmentCheck::New(
alloc(), *base, access->byteSize(), bytecodeOffset()));
}
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
if (boundsCheckLimit) {
auto* ins = MWasmBoundsCheck::New(alloc(), *base, boundsCheckLimit,
bytecodeOffset());
curBlock_->add(ins);
if (JitOptions.spectreIndexMasking) {
*base = ins;
}
}
}
bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
if (result == ValType::I64 && access->byteSize() <= 4) {
// These smaller accesses should all be zero-extending.
MOZ_ASSERT(!isSignedIntType(access->type()));
return true;
}
return false;
}
public:
MDefinition* computeEffectiveAddress(MDefinition* base,
MemoryAccessDesc* access) {
if (inDeadCode()) {
return nullptr;
}
if (!access->offset()) {
return base;
}
auto* ins =
MWasmAddOffset::New(alloc(), base, access->offset(), bytecodeOffset());
curBlock_->add(ins);
access->clearOffset();
return ins;
}
MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
ValType result) {
if (inDeadCode()) {
return nullptr;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* load = nullptr;
if (env_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type());
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
load =
MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
}
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
if (inDeadCode()) {
return;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* store = nullptr;
if (env_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type(), v);
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
}
if (!store) {
return;
}
curBlock_->add(store);
}
MDefinition* atomicCompareExchangeHeap(MDefinition* base,
MemoryAccessDesc* access,
ValType result, MDefinition* oldv,
MDefinition* newv) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
if (isSmallerAccessForI64(result, access)) {
auto* cvtOldv =
MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
curBlock_->add(cvtOldv);
oldv = cvtOldv;
auto* cvtNewv =
MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
curBlock_->add(cvtNewv);
newv = cvtNewv;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* cas =
MWasmCompareExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, oldv, newv, tlsPointer_);
if (!cas) {
return nullptr;
}
curBlock_->add(cas);
if (isSmallerAccessForI64(result, access)) {
cas = MExtendInt32ToInt64::New(alloc(), cas, true);
curBlock_->add(cas);
}
return cas;
}
MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
ValType result, MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* xchg =
MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, value, tlsPointer_);
if (!xchg) {
return nullptr;
}
curBlock_->add(xchg);
if (isSmallerAccessForI64(result, access)) {
xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
curBlock_->add(xchg);
}
return xchg;
}
MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
MemoryAccessDesc* access, ValType result,
MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* binop =
MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
base, *access, value, tlsPointer_);
if (!binop) {
return nullptr;
}
curBlock_->add(binop);
if (isSmallerAccessForI64(result, access)) {
binop = MExtendInt32ToInt64::New(alloc(), binop, true);
curBlock_->add(binop);
}
return binop;
}
MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst,
bool isIndirect, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* load;
if (isIndirect) {
// Pull a pointer to the value out of TlsData::globalArea, then
// load from that pointer. Note that the pointer is immutable
// even though the value it points at may change, hence the use of
// |true| for the first node's |isConst| value, irrespective of
// the |isConst| formal parameter to this method. The latter
// applies to the denoted value as a whole.
auto* cellPtr =
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
/*isConst=*/true, tlsPointer_);
curBlock_->add(cellPtr);
load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
} else {
// Pull the value directly out of TlsData::globalArea.
load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst,
tlsPointer_);
}
curBlock_->add(load);
return load;
}
MInstruction* storeGlobalVar(uint32_t globalDataOffset, bool isIndirect,
MDefinition* v) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* store;
MInstruction* valueAddr = nullptr;
if (isIndirect) {
// Pull a pointer to the value out of TlsData::globalArea, then
// store through that pointer.
auto* cellPtr =
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
/*isConst=*/true, tlsPointer_);
curBlock_->add(cellPtr);
if (v->type() == MIRType::RefOrNull) {
valueAddr = cellPtr;
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
AliasSet::WasmGlobalCell);
} else {
store = MWasmStoreGlobalCell::New(alloc(), v, cellPtr);
}
} else {
// Store the value directly in TlsData::globalArea.
if (v->type() == MIRType::RefOrNull) {
valueAddr = MWasmDerivedPointer::New(
alloc(), tlsPointer_,
offsetof(wasm::TlsData, globalArea) + globalDataOffset);
curBlock_->add(valueAddr);
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
AliasSet::WasmGlobalVar);
} else {
store =
MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_);
}
}
curBlock_->add(store);
return valueAddr;
}
void addInterruptCheck() {
if (inDeadCode()) {
return;
}
curBlock_->add(
MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
}
/***************************************************************** Calls */
// The IonMonkey backend maintains a single stack offset (from the stack
// pointer to the base of the frame) by adding the total amount of spill
// space required plus the maximum stack required for argument passing.
// Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
// manually accumulate, for the entire function, the maximum required stack
// space for argument passing. (This is passed to the CodeGenerator via
// MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
// stack space required for each individual call (as determined by the call
// ABI).
// Operations that modify a CallCompileState.
bool passInstance(MIRType instanceType, CallCompileState* args) {
if (inDeadCode()) {
return true;
}
// Should only pass an instance once. And it must be a non-GC pointer.
MOZ_ASSERT(args->instanceArg_ == ABIArg());
MOZ_ASSERT(instanceType == MIRType::Pointer);
args->instanceArg_ = args->abi_.next(MIRType::Pointer);
return true;
}
// Do not call this directly. Call one of the passArg() variants instead.
bool passArgWorker(MDefinition* argDef, MIRType type,
CallCompileState* call) {
ABIArg arg = call->abi_.next(type);
switch (arg.kind()) {
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR: {
auto mirLow =
MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
curBlock_->add(mirLow);
auto mirHigh =
MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
curBlock_->add(mirHigh);
return call->regArgs_.append(
MWasmCall::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
call->regArgs_.append(
MWasmCall::Arg(AnyRegister(arg.gpr64().high), mirHigh));
}
#endif
case ABIArg::GPR:
case ABIArg::FPU:
return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
case ABIArg::Stack: {
auto* mir =
MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
curBlock_->add(mir);
return true;
}
case ABIArg::Uninitialized:
MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
}
MOZ_CRASH("Unknown ABIArg kind.");
}
bool passArg(MDefinition* argDef, MIRType type, CallCompileState* call) {
if (inDeadCode()) {
return true;
}
return passArgWorker(argDef, type, call);
}
bool passArg(MDefinition* argDef, ValType type, CallCompileState* call) {
if (inDeadCode()) {
return true;
}
return passArgWorker(argDef, ToMIRType(type), call);
}
// If the call returns results on the stack, prepare a stack area to receive
// them, and pass the address of the stack area to the callee as an additional
// argument.
bool passStackResultAreaCallArg(const ResultType& resultType,
CallCompileState* call) {
if (inDeadCode()) {
return true;
}
ABIResultIter iter(resultType);
while (!iter.done() && iter.cur().inRegister()) {
iter.next();
}
if (iter.done()) {
// No stack results.
return true;
}
auto* stackResultArea = MWasmStackResultArea::New(alloc());
if (!stackResultArea) {
return false;
}
if (!stackResultArea->init(alloc(), iter.remaining())) {
return false;
}
for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
ToMIRType(iter.cur().type()));
stackResultArea->initResult(iter.index() - base, loc);
}
curBlock_->add(stackResultArea);
if (!passArg(stackResultArea, MIRType::Pointer, call)) {
return false;
}
call->stackResultArea_ = stackResultArea;
return true;
}
bool finishCall(CallCompileState* call) {
if (inDeadCode()) {
return true;
}
if (!call->regArgs_.append(
MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_))) {
return false;
}
uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
return true;
}
// Wrappers for creating various kinds of calls.
bool collectUnaryCallResult(MIRType type, MDefinition** result) {
MInstruction* def;
switch (type) {
case MIRType::Int32:
def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
break;
case MIRType::Int64:
def = MWasmRegister64Result::New(alloc(), ReturnReg64);
break;
case MIRType::Float32:
def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
break;
case MIRType::Double:
def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
break;
#ifdef ENABLE_WASM_SIMD
case MIRType::Simd128:
def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
break;
#endif
case MIRType::RefOrNull:
def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull, ReturnReg);
break;
default:
MOZ_CRASH("unexpected MIRType result for builtin call");
}
if (!def) {
return false;
}
curBlock_->add(def);
*result = def;
return true;
}
bool collectCallResults(const ResultType& type,
MWasmStackResultArea* stackResultArea,
DefVector* results) {
if (!results->reserve(type.length())) {
return false;
}
// The result iterator goes in the order in which results would be popped
// off; we want the order in which they would be pushed.
ABIResultIter iter(type);
uint32_t stackResultCount = 0;
while (!iter.done()) {
if (iter.cur().onStack()) {
stackResultCount++;
}
iter.next();
}
for (iter.switchToPrev(); !iter.done(); iter.prev()) {
const ABIResult& result = iter.cur();
MInstruction* def;
if (result.inRegister()) {
switch (result.type().kind()) {
case wasm::ValType::I32:
def =
MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
break;
case wasm::ValType::I64:
def = MWasmRegister64Result::New(alloc(), result.gpr64());
break;
case wasm::ValType::F32:
def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
result.fpr());
break;
case wasm::ValType::F64:
def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
result.fpr());
break;
case wasm::ValType::Ref:
def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull,
result.gpr());
break;
case wasm::ValType::V128:
#ifdef ENABLE_WASM_SIMD
def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
result.fpr());
#else
return this->iter().fail("Ion has no SIMD support yet");
#endif
}
} else {
MOZ_ASSERT(stackResultArea);
MOZ_ASSERT(stackResultCount);
uint32_t idx = --stackResultCount;
def = MWasmStackResult::New(alloc(), stackResultArea, idx);
}
if (!def) {
return false;
}
curBlock_->add(def);
results->infallibleAppend(def);
}
MOZ_ASSERT(results->length() == type.length());
return true;
}
bool callDirect(const FuncType& funcType, uint32_t funcIndex,
uint32_t lineOrBytecode, const CallCompileState& call,
DefVector* results) {
if (inDeadCode()) {
return true;
}
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
ResultType resultType = ResultType::Vector(funcType.results());
auto callee = CalleeDesc::function(funcIndex);
ArgTypeVector args(funcType);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
StackArgAreaSizeUnaligned(args));
if (!ins) {
return false;
}
curBlock_->add(ins);
return collectCallResults(resultType, call.stackResultArea_, results);
}
bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
MDefinition* index, uint32_t lineOrBytecode,
const CallCompileState& call, DefVector* results) {
if (inDeadCode()) {
return true;
}
const FuncTypeWithId& funcType = env_.types[funcTypeIndex].funcType();
CalleeDesc callee;
if (env_.isAsmJS()) {
MOZ_ASSERT(tableIndex == 0);
MOZ_ASSERT(funcType.id.kind() == FuncTypeIdDescKind::None);
const TableDesc& table =
env_.tables[env_.asmJSSigToTableIndex[funcTypeIndex]];
MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
MConstant* mask =
MConstant::New(alloc(), Int32Value(table.initialLength - 1));
curBlock_->add(mask);
MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
curBlock_->add(maskedIndex);
index = maskedIndex;
callee = CalleeDesc::asmJSTable(table);
} else {
MOZ_ASSERT(funcType.id.kind() != FuncTypeIdDescKind::None);
const TableDesc& table = env_.tables[tableIndex];
callee = CalleeDesc::wasmTable(table, funcType.id);
}
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
ArgTypeVector args(funcType);
ResultType resultType = ResultType::Vector(funcType.results());
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
StackArgAreaSizeUnaligned(args), index);
if (!ins) {
return false;
}
curBlock_->add(ins);
return collectCallResults(resultType, call.stackResultArea_, results);
}
bool callImport(unsigned globalDataOffset, uint32_t lineOrBytecode,
const CallCompileState& call, const FuncType& funcType,
DefVector* results) {
if (inDeadCode()) {
return true;
}
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
auto callee = CalleeDesc::import(globalDataOffset);
ArgTypeVector args(funcType);
ResultType resultType = ResultType::Vector(funcType.results());
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
StackArgAreaSizeUnaligned(args));
if (!ins) {
return false;
}
curBlock_->add(ins);
return collectCallResults(resultType, call.stackResultArea_, results);
}
bool builtinCall(const SymbolicAddressSignature& builtin,
uint32_t lineOrBytecode, const CallCompileState& call,
MDefinition** def) {
if (inDeadCode()) {
*def = nullptr;
return true;
}
MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
auto callee = CalleeDesc::builtin(builtin.identity);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
StackArgAreaSizeUnaligned(builtin));
if (!ins) {
return false;
}
curBlock_->add(ins);
return collectUnaryCallResult(builtin.retType, def);
}
bool builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
uint32_t lineOrBytecode,
const CallCompileState& call,
MDefinition** def = nullptr) {
MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
if (inDeadCode()) {
if (def) {
*def = nullptr;
}
return true;
}
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(
alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
call.regArgs_, StackArgAreaSizeUnaligned(builtin));
if (!ins) {
return false;
}
curBlock_->add(ins);
return def ? collectUnaryCallResult(builtin.retType, def) : true;
}
/*********************************************** Control flow generation */
inline bool inDeadCode() const { return curBlock_ == nullptr; }
bool returnValues(const DefVector& values) {
if (inDeadCode()) {
return true;
}
if (values.empty()) {
curBlock_->end(MWasmReturnVoid::New(alloc()));
} else {
ResultType resultType = ResultType::Vector(funcType().results());
ABIResultIter iter(resultType);
// Switch to iterate in FIFO order instead of the default LIFO.
while (!iter.done()) {
iter.next();
}
iter.switchToPrev();
for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
if (!mirGen().ensureBallast()) {
return false;
}
const ABIResult& result = iter.cur();
if (result.onStack()) {
MOZ_ASSERT(iter.remaining() > 1);
if (result.type().isReference()) {
auto* loc = MWasmDerivedPointer::New(alloc(), stackResultPointer_,
result.stackOffset());
curBlock_->add(loc);
auto* store =
MWasmStoreRef::New(alloc(), tlsPointer_, loc, values[i],
AliasSet::WasmStackResult);
curBlock_->add(store);
} else {
auto* store = MWasmStoreStackResult::New(
alloc(), stackResultPointer_, result.stackOffset(), values[i]);
curBlock_->add(store);
}
} else {
MOZ_ASSERT(iter.remaining() == 1);
MOZ_ASSERT(i + 1 == values.length());
curBlock_->end(MWasmReturn::New(alloc(), values[i]));
}
}
}
curBlock_ = nullptr;
return true;
}
void unreachableTrap() {
if (inDeadCode()) {
return;
}
auto* ins =
MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
curBlock_->end(ins);
curBlock_ = nullptr;
}
private:
static uint32_t numPushed(MBasicBlock* block) {
return block->stackDepth() - block->info().firstStackSlot();
}
public:
MOZ_MUST_USE bool pushDefs(const DefVector& defs) {
if (inDeadCode()) {
return true;
}
MOZ_ASSERT(numPushed(curBlock_) == 0);
if (!curBlock_->ensureHasSlots(defs.length())) {
return false;
}
for (MDefinition* def : defs) {
MOZ_ASSERT(def->type() != MIRType::None);
curBlock_->push(def);
}
return true;
}
bool popPushedDefs(DefVector* defs) {
size_t n = numPushed(curBlock_);
if (!defs->resizeUninitialized(n)) {
return false;
}
for (; n > 0; n--) {
MDefinition* def = curBlock_->pop();
MOZ_ASSERT(def->type() != MIRType::Value);
(*defs)[n - 1] = def;
}
return true;
}
private:
bool addJoinPredecessor(const DefVector& defs, MBasicBlock** joinPred) {
*joinPred = curBlock_;
if (inDeadCode()) {
return true;
}
return pushDefs(defs);
}
public:
bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock) {
if (inDeadCode()) {
*elseBlock = nullptr;
} else {
MBasicBlock* thenBlock;
if (!newBlock(curBlock_, &thenBlock)) {
return false;
}
if (!newBlock(curBlock_, elseBlock)) {
return false;
}
curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
curBlock_ = thenBlock;
mirGraph().moveBlockToEnd(curBlock_);
}
return startBlock();
}
bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) {
DefVector values;
if (!finishBlock(&values)) {
return false;
}
if (!elseBlock) {
*thenJoinPred = nullptr;
} else {
if (!addJoinPredecessor(values, thenJoinPred)) {
return false;
}
curBlock_ = elseBlock;
mirGraph().moveBlockToEnd(curBlock_);
}
return startBlock();
}
bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
DefVector values;
if (!finishBlock(&values)) {
return false;
}
if (!thenJoinPred && inDeadCode()) {
return true;
}
MBasicBlock* elseJoinPred;
if (!addJoinPredecessor(values, &elseJoinPred)) {
return false;
}
mozilla::Array<MBasicBlock*, 2> blocks;
size_t numJoinPreds = 0;
if (thenJoinPred) {
blocks[numJoinPreds++] = thenJoinPred;
}
if (elseJoinPred) {
blocks[numJoinPreds++] = elseJoinPred;
}
if (numJoinPreds == 0) {
return true;
}
MBasicBlock* join;
if (!goToNewBlock(blocks[0], &join)) {
return false;
}
for (size_t i = 1; i < numJoinPreds; ++i) {
if (!goToExistingBlock(blocks[i], join)) {
return false;
}
}
curBlock_ = join;
return popPushedDefs(defs);
}
bool startBlock() {
MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
blockPatches_[blockDepth_].empty());
blockDepth_++;
return true;
}
bool finishBlock(DefVector* defs) {
MOZ_ASSERT(blockDepth_);
uint32_t topLabel = --blockDepth_;
return bindBranches(topLabel, defs);
}
bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
*loopHeader = nullptr;
blockDepth_++;
loopDepth_++;
if (inDeadCode()) {
return true;
}
// Create the loop header.
MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
*loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
MBasicBlock::PENDING_LOOP_HEADER);
if (!*loopHeader) {
return false;
}
(*loopHeader)->setLoopDepth(