Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmIonCompile.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jit/ABIArgGenerator.h"
#include "jit/CodeGenerator.h"
#include "jit/CompileInfo.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/ShuffleAnalysis.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmIntrinsic.h"
#include "wasm/WasmOpIter.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::IsPowerOfTwo;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
namespace {
using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
// To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
// from being just an MBasicBlock* to a Control structure collecting additional
// information.
using ControlInstructionVector =
Vector<MControlInstruction*, 8, SystemAllocPolicy>;
struct Control {
MBasicBlock* block;
// For a try-catch ControlItem, when its block's Labelkind is Try, this
// collects branches to later bind and create the try's landing pad.
ControlInstructionVector tryPadPatches;
Control() : block(nullptr) {}
explicit Control(MBasicBlock* block) : block(block) {}
public:
void setBlock(MBasicBlock* newBlock) { block = newBlock; }
};
// [SMDOC] WebAssembly Exception Handling in Ion
// =======================================================
//
// ## Throwing instructions
//
// Wasm exceptions can be thrown by either a throw instruction (local throw),
// or by a wasm call.
//
// ## The "catching try control"
//
// We know we are in try-code if there is a surrounding ControlItem with
// LabelKind::Try. The innermost such control is called the
// "catching try control".
//
// ## Throws without a catching try control
//
// Such throws are implemented with an instance call that triggers the exception
// unwinding runtime. The exception unwinding runtime will not return to the
// function.
//
// ## "landing pad" and "pre-pad" blocks
//
// When an exception is thrown, the unwinder will search for the nearest
// enclosing try block and redirect control flow to it. The code that executes
// before any catch blocks is called the 'landing pad'. The 'landing pad' is
// responsible to:
// 1. Consume the pending exception state from
// Instance::pendingException(Tag)
// 2. Branch to the correct catch block, or else rethrow
//
// There is one landing pad for each try block. The immediate predecessors of
// the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
// throwing instruction.
//
// ## Creating pre-pad blocks
//
// There are two possible sorts of pre-pad blocks, depending on whether we
// are branching after a local throw instruction, or after a wasm call:
//
// - If we encounter a local throw, we create the exception and tag objects,
// store them to Instance::pendingException(Tag), and then jump to the
// landing pad.
//
// - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
// control instruction with either a branch to a fallthrough block or
// to a pre-pad block.
//
// The pre-pad block for a wasm call is empty except for a jump to the
// landing pad. It only exists to avoid critical edges which when split would
// violate the invariants of MWasmCallCatchable. The pending exception state
// is taken care of by the unwinder.
//
// Each pre-pad ends with a pending jump to the landing pad. The pending jumps
// to the landing pad are tracked in `tryPadPatches`. These are called
// "pad patches".
//
// ## Creating the landing pad
//
// When we exit try-code, we check if tryPadPatches has captured any control
// instructions (pad patches). If not, we don't compile any catches and we mark
// the rest as dead code.
//
// If there are pre-pad blocks, we join them to create a landing pad (or just
// "pad"). The pad's last two slots are the caught exception, and the
// exception's tag object.
//
// There are three different forms of try-catch/catch_all Wasm instructions,
// which result in different form of landing pad.
//
// 1. A catchless try, so a Wasm instruction of the form "try ... end".
// - In this case, we end the pad by rethrowing the caught exception.
//
// 2. A single catch_all after a try.
// - If the first catch after a try is a catch_all, then there won't be
// any more catches, but we need the exception and its tag object, in
// case the code in a catch_all contains "rethrow" instructions.
// - The Wasm instruction "rethrow", gets the exception and tag object to
// rethrow from the last two slots of the landing pad which, due to
// validation, is the l'th surrounding ControlItem.
// - We immediately GoTo to a new block after the pad and pop both the
// exception and tag object, as we don't need them anymore in this case.
//
// 3. Otherwise, there is one or more catch code blocks following.
// - In this case, we construct the landing pad by creating a sequence
// of compare and branch blocks that compare the pending exception tag
// object to the tag object of the current tagged catch block. This is
// done incrementally as we visit each tagged catch block in the bytecode
// stream. At every step, we update the ControlItem's block to point to
// the next block to be created in the landing pad sequence. The final
// block will either be a rethrow, if there is no catch_all, or else a
// jump to a catch_all block.
struct IonCompilePolicy {
// We store SSA definitions in the value stack.
using Value = MDefinition*;
using ValueVector = DefVector;
// We store loop headers and then/else blocks in the control flow stack.
// In the case of try-catch control blocks, we collect additional information
// regarding the possible paths from throws and calls to a landing pad, as
// well as information on the landing pad's handlers (its catches).
using ControlItem = Control;
};
using IonOpIter = OpIter<IonCompilePolicy>;
class FunctionCompiler;
// CallCompileState describes a call that is being compiled.
class CallCompileState {
// A generator object that is passed each argument as it is compiled.
WasmABIArgGenerator abi_;
// Accumulates the register arguments while compiling arguments.
MWasmCallBase::Args regArgs_;
// Reserved argument for passing Instance* to builtin instance method calls.
ABIArg instanceArg_;
// The stack area in which the callee will write stack return values, or
// nullptr if no stack results.
MWasmStackResultArea* stackResultArea_ = nullptr;
// Only FunctionCompiler should be directly manipulating CallCompileState.
friend class FunctionCompiler;
};
// Encapsulates the compilation of a single function in an asm.js module. The
// function compiler handles the creation and final backend compilation of the
// MIR graph.
class FunctionCompiler {
struct ControlFlowPatch {
MControlInstruction* ins;
uint32_t index;
ControlFlowPatch(MControlInstruction* ins, uint32_t index)
: ins(ins), index(index) {}
};
using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
using ControlFlowPatchVectorVector =
Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
const ModuleEnvironment& moduleEnv_;
IonOpIter iter_;
const FuncCompileInput& func_;
const ValTypeVector& locals_;
size_t lastReadCallSite_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
MBasicBlock* curBlock_;
uint32_t maxStackArgBytes_;
uint32_t loopDepth_;
uint32_t blockDepth_;
ControlFlowPatchVectorVector blockPatches_;
// Instance pointer argument to the current function.
MWasmParameter* instancePointer_;
MWasmParameter* stackResultPointer_;
// Reference to masm.tryNotes_
WasmTryNoteVector& tryNotes_;
public:
FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
const FuncCompileInput& func, const ValTypeVector& locals,
MIRGenerator& mirGen, WasmTryNoteVector& tryNotes)
: moduleEnv_(moduleEnv),
iter_(moduleEnv, decoder),
func_(func),
locals_(locals),
lastReadCallSite_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
info_(mirGen.outerInfo()),
mirGen_(mirGen),
curBlock_(nullptr),
maxStackArgBytes_(0),
loopDepth_(0),
blockDepth_(0),
instancePointer_(nullptr),
stackResultPointer_(nullptr),
tryNotes_(tryNotes) {}
const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
IonOpIter& iter() { return iter_; }
TempAllocator& alloc() const { return alloc_; }
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex() const { return func_.index; }
const FuncType& funcType() const {
return *moduleEnv_.funcs[func_.index].type;
}
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
BytecodeOffset bytecodeIfNotAsmJS() const {
return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
}
bool init() {
// Prepare the entry block for MIR generation:
const ArgTypeVector args(funcType());
if (!mirGen_.ensureBallast()) {
return false;
}
if (!newBlock(/* prev */ nullptr, &curBlock_)) {
return false;
}
for (WasmABIArgIter i(args); !i.done(); i++) {
MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
curBlock_->add(ins);
if (args.isSyntheticStackResultPointerArg(i.index())) {
MOZ_ASSERT(stackResultPointer_ == nullptr);
stackResultPointer_ = ins;
} else {
curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
ins);
}
if (!mirGen_.ensureBallast()) {
return false;
}
}
// Set up a parameter that receives the hidden instance pointer argument.
instancePointer_ =
MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
curBlock_->add(instancePointer_);
if (!mirGen_.ensureBallast()) {
return false;
}
for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
i++) {
MInstruction* ins = nullptr;
switch (locals_[i].kind()) {
case ValType::I32:
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
break;
case ValType::I64:
ins = MConstant::NewInt64(alloc(), 0);
break;
case ValType::V128:
#ifdef ENABLE_WASM_SIMD
ins =
MWasmFloatConstant::NewSimd128(alloc(), SimdConstant::SplatX4(0));
break;
#else
return iter().fail("Ion has no SIMD support yet");
#endif
case ValType::F32:
ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
break;
case ValType::F64:
ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
break;
case ValType::Rtt:
case ValType::Ref:
ins = MWasmNullConstant::New(alloc());
break;
}
curBlock_->add(ins);
curBlock_->initSlot(info().localSlot(i), ins);
if (!mirGen_.ensureBallast()) {
return false;
}
}
return true;
}
void finish() {
mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
MOZ_ASSERT(loopDepth_ == 0);
MOZ_ASSERT(blockDepth_ == 0);
#ifdef DEBUG
for (ControlFlowPatchVector& patches : blockPatches_) {
MOZ_ASSERT(patches.empty());
}
#endif
MOZ_ASSERT(inDeadCode());
MOZ_ASSERT(done(), "all bytes must be consumed");
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
}
/************************* Read-only interface (after local scope setup) */
MIRGenerator& mirGen() const { return mirGen_; }
MIRGraph& mirGraph() const { return graph_; }
const CompileInfo& info() const { return info_; }
MDefinition* getLocalDef(unsigned slot) {
if (inDeadCode()) {
return nullptr;
}
return curBlock_->getSlot(info().localSlot(slot));
}
const ValTypeVector& locals() const { return locals_; }
/***************************** Code generation (after local scope setup) */
MDefinition* constant(const Value& v, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::New(alloc(), v, type);
curBlock_->add(constant);
return constant;
}
MDefinition* constant(float f) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(double d) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(int64_t i) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::NewInt64(alloc(), i);
curBlock_->add(constant);
return constant;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* constant(V128 v) {
if (inDeadCode()) {
return nullptr;
}
MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
curBlock_->add(constant);
return constant;
}
#endif
MDefinition* nullRefConstant() {
if (inDeadCode()) {
return nullptr;
}
// MConstant has a lot of baggage so we don't use that here.
MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
curBlock_->add(constant);
return constant;
}
void fence() {
if (inDeadCode()) {
return;
}
MWasmFence* ins = MWasmFence::New(alloc());
curBlock_->add(ins);
}
template <class T>
MDefinition* unary(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* unary(MDefinition* op, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
MWasmBinaryBitwise::SubOpcode subOpc) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
curBlock_->add(ins);
return ins;
}
MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
bool mustPreserveNaN(MIRType type) {
return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
}
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x - 0.0 because of NaN with custom payloads.
MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
curBlock_->add(ins);
return ins;
}
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool isMax) {
if (inDeadCode()) {
return nullptr;
}
if (mustPreserveNaN(type)) {
// Convert signaling NaN to quiet NaNs.
MDefinition* zero = constant(DoubleValue(0.0), type);
lhs = sub(lhs, zero, type);
rhs = sub(rhs, zero, type);
}
MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
curBlock_->add(ins);
return ins;
}
MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
MMul::Mode mode) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x * 1.0 because of NaN with custom payloads.
auto* ins =
MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// Enforce the signedness of the operation by coercing the operands
// to signed. Otherwise, operands that "look" unsigned to Ion but
// are not unsigned to Baldr (eg, unsigned right shifts) may lead to
// the operation being executed unsigned. Applies to mod() as well.
//
// Do this for Int32 only since Int64 is not subject to the same
// issues.
//
// Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
// but it doesn't matter: they're not codegen'd to calls since inputs
// already are int32.
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 div via c++ builtin.
// A call to c++ builtin requires instance pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset(), mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MInstruction* createTruncateToInt32(MDefinition* op) {
if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
}
return MTruncateToInt32::New(alloc(), op);
}
MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// See block comment in div().
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 mod via c++ builtin.
// A call to c++ builtin requires instance pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
// Should be handled separately because we call BuiltinThunk for this case
// and so, need to add the dependency from instancePointer.
if (type == MIRType::Double) {
auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
type, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset());
curBlock_->add(ins);
return ins;
}
MDefinition* bitnot(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MBitNot::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
MDefinition* condExpr) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
curBlock_->add(ins);
return ins;
}
MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
curBlock_->add(ins);
return ins;
}
MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
uint32_t targetSize) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* ins;
switch (targetSize) {
case 4: {
MSignExtendInt32::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt32::Byte;
break;
case 2:
mode = MSignExtendInt32::Half;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt32::New(alloc(), op, mode);
break;
}
case 8: {
MSignExtendInt64::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt64::Byte;
break;
case 2:
mode = MSignExtendInt64::Half;
break;
case 4:
mode = MSignExtendInt64::Word;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt64::New(alloc(), op, mode);
break;
}
default: {
MOZ_CRASH("Bad sign extension");
}
}
curBlock_->add(ins);
return ins;
}
MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
#if defined(JS_CODEGEN_ARM)
auto* ins = MBuiltinInt64ToFloatingPoint::New(
alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
#else
auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
isUnsigned);
#endif
curBlock_->add(ins);
return ins;
}
MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
bool left) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MRotate::New(alloc(), input, count, type, left);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* truncate(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#if defined(JS_CODEGEN_ARM)
MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
MCompare::CompareType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
curBlock_->add(ins);
return ins;
}
void assign(unsigned slot, MDefinition* def) {
if (inDeadCode()) {
return;
}
curBlock_->setSlot(info().localSlot(slot), def);
}
#ifdef ENABLE_WASM_SIMD
// About Wasm SIMD as supported by Ion:
//
// The expectation is that Ion will only ever support SIMD on x86 and x64,
// since Cranelift will be the optimizing compiler for Arm64, ARMv7 will cease
// to be a tier-1 platform soon, and MIPS64 will never implement SIMD.
//
// The division of the operations into MIR nodes reflects that expectation,
// and is a good fit for x86/x64. Should the expectation change we'll
// possibly want to re-architect the SIMD support to be a little more general.
//
// Most SIMD operations map directly to a single MIR node that ultimately ends
// up being expanded in the macroassembler.
//
// Some SIMD operations that do have a complete macroassembler expansion are
// open-coded into multiple MIR nodes here; in some cases that's just
// convenience, in other cases it may also allow them to benefit from Ion
// optimizations. The reason for the expansions will be documented by a
// comment.
// (v128,v128) -> v128 effect-free binary operations
MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
bool commutative, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Simd128);
auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
curBlock_->add(ins);
return ins;
}
// (v128,i32) -> v128 effect-free shift operations
MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Int32);
int32_t maskBits;
if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
MConstant* mask = MConstant::New(alloc(), Int32Value(maskBits));
curBlock_->add(mask);
auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
curBlock_->add(rhs2);
rhs = rhs2;
}
auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
curBlock_->add(ins);
return ins;
}
// (v128,scalar,imm) -> v128
MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
uint32_t laneIndex, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128);
auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
curBlock_->add(ins);
return ins;
}
// (scalar) -> v128 effect-free unary operations
MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128) -> v128 effect-free unary operations
MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128, imm) -> scalar effect-free unary operations
MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
uint32_t imm = 0) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins =
MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
curBlock_->add(ins);
return ins;
}
// (v128, v128, v128) -> v128 effect-free operations
MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
v1->type() == MIRType::Simd128 &&
v2->type() == MIRType::Simd128);
auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
curBlock_->add(ins);
return ins;
}
// (v128, v128, imm_v128) -> v128 effect-free operations
MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v1->type() == MIRType::Simd128);
MOZ_ASSERT(v2->type() == MIRType::Simd128);
auto* ins = BuildWasmShuffleSimd128(
alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
curBlock_->add(ins);
return ins;
}
// Also see below for SIMD memory references
#endif // ENABLE_WASM_SIMD
/************************************************ Linear memory accesses */
// For detailed information about memory accesses, see "Linear memory
// addresses and bounds checking" in WasmMemory.cpp.
private:
// If the platform does not have a HeapReg, load the memory base from
// instance.
MWasmLoadInstance* maybeLoadMemoryBase() {
MWasmLoadInstance* load = nullptr;
#ifdef JS_CODEGEN_X86
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
load = MWasmLoadInstance::New(alloc(), instancePointer_,
wasm::Instance::offsetOfMemoryBase(),
MIRType::Pointer, aliases);
curBlock_->add(load);
#endif
return load;
}
public:
// A value holding the memory base, whether that's HeapReg or some other
// register.
MWasmHeapBase* memoryBase() {
MWasmHeapBase* base = nullptr;
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
base = MWasmHeapBase::New(alloc(), instancePointer_, aliases);
curBlock_->add(base);
return base;
}
private:
// If the bounds checking strategy requires it, load the bounds check limit
// from the instance.
MWasmLoadInstance* maybeLoadBoundsCheckLimit(MIRType type) {
MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
if (moduleEnv_.hugeMemoryEnabled()) {
return nullptr;
}
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
auto* load = MWasmLoadInstance::New(
alloc(), instancePointer_, wasm::Instance::offsetOfBoundsCheckLimit(),
type, aliases);
curBlock_->add(load);
return load;
}
// Return true if the access requires an alignment check. If so, sets
// *mustAdd to true if the offset must be added to the pointer before
// checking.
bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
bool* mustAdd) {
MOZ_ASSERT(!*mustAdd);
// asm.js accesses are always aligned and need no checks.
if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
return false;
}
// If the EA is known and aligned it will need no checks.
if (base->isConstant()) {
// We only care about the low bits, so overflow is OK, as is chopping off
// the high bits of an i64 pointer.
uint32_t ptr = 0;
if (isMem64()) {
ptr = uint32_t(base->toConstant()->toInt64());
} else {
ptr = base->toConstant()->toInt32();
}
if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
return false;
}
}
// If the offset is aligned then the EA is just the pointer, for
// the purposes of this check.
*mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
return true;
}
// Fold a constant base into the offset and make the base 0, provided the
// offset stays below the guard limit. The reason for folding the base into
// the offset rather than vice versa is that a small offset can be ignored
// by both explicit bounds checking and bounds check elimination.
void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
uint32_t offsetGuardLimit =
GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
if ((*base)->isConstant()) {
uint64_t basePtr = 0;
if (isMem64()) {
basePtr = uint64_t((*base)->toConstant()->toInt64());
} else {
basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
}
uint64_t offset = access->offset64();
if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
offset += uint32_t(basePtr);
access->setOffset32(uint32_t(offset));
MConstant* ins = nullptr;
if (isMem64()) {
ins = MConstant::NewInt64(alloc(), 0);
} else {
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
}
curBlock_->add(ins);
*base = ins;
}
}
}
// If the offset must be added because it is large or because the true EA must
// be checked, compute the effective address, trapping on overflow.
void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
MDefinition** base, bool mustAddOffset) {
uint32_t offsetGuardLimit =
GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
if (access->offset64() >= offsetGuardLimit ||
access->offset64() > UINT32_MAX || mustAddOffset ||
!JitOptions.wasmFoldOffsets) {
*base = computeEffectiveAddress(*base, access);
}
}
MWasmLoadInstance* needBoundsCheck() {
#ifdef JS_64BIT
// For 32-bit base pointers:
//
// If the bounds check uses the full 64 bits of the bounds check limit, then
// the base pointer must be zero-extended to 64 bits before checking and
// wrapped back to 32-bits after Spectre masking. (And it's important that
// the value we end up with has flowed through the Spectre mask.)
//
// If the memory's max size is known to be smaller than 64K pages exactly,
// we can use a 32-bit check and avoid extension and wrapping.
static_assert(0x100000000 % PageSize == 0);
bool mem32LimitIs64Bits = isMem32() &&
!moduleEnv_.memory->boundsCheckLimitIs32Bits() &&
MaxMemoryPages(moduleEnv_.memory->indexType()) >=
Pages(0x100000000 / PageSize);
#else
// On 32-bit platforms we have no more than 2GB memory and the limit for a
// 32-bit base pointer is never a 64-bit value.
bool mem32LimitIs64Bits = false;
#endif
return maybeLoadBoundsCheckLimit(
mem32LimitIs64Bits || isMem64() ? MIRType::Int64 : MIRType::Int32);
}
void performBoundsCheck(MDefinition** base,
MWasmLoadInstance* boundsCheckLimit) {
// At the outset, actualBase could be the result of pretty much any integer
// operation, or it could be the load of an integer constant. If its type
// is i32, we may assume the value has a canonical representation for the
// platform, see doc block in MacroAssembler.h.
MDefinition* actualBase = *base;
// Extend an i32 index value to perform a 64-bit bounds check if the memory
// can be 4GB or larger.
bool extendAndWrapIndex =
isMem32() && boundsCheckLimit->type() == MIRType::Int64;
if (extendAndWrapIndex) {
auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
curBlock_->add(extended);
actualBase = extended;
}
auto* ins =
MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
bytecodeOffset(), MWasmBoundsCheck::Memory);
curBlock_->add(ins);
actualBase = ins;
// If we're masking, then we update *base to create a dependency chain
// through the masked index. But we will first need to wrap the index
// value if it was extended above.
if (JitOptions.spectreIndexMasking) {
if (extendAndWrapIndex) {
auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
curBlock_->add(wrapped);
actualBase = wrapped;
}
*base = actualBase;
}
}
// Perform all necessary checking before a wasm heap access, based on the
// attributes of the access and base pointer.
//
// For 64-bit indices on platforms that are limited to indices that fit into
// 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
// `base` that has type Int32. Lowering code depends on this and will assert
// that the base has this type. See the end of this function.
void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
MDefinition** base) {
MOZ_ASSERT(!inDeadCode());
MOZ_ASSERT(!moduleEnv_.isAsmJS());
// Attempt to fold an offset into a constant base pointer so as to simplify
// the addressing expression. This may update *base.
foldConstantPointer(access, base);
// Determine whether an alignment check is needed and whether the offset
// must be checked too.
bool mustAddOffsetForAlignmentCheck = false;
bool alignmentCheck =
needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
// If bounds checking or alignment checking requires it, compute the
// effective address: add the offset into the pointer and trap on overflow.
// This may update *base.
maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
// Emit the alignment check if necessary; it traps if it fails.
if (alignmentCheck) {
curBlock_->add(MWasmAlignmentCheck::New(
alloc(), *base, access->byteSize(), bytecodeOffset()));
}
// Emit the bounds check if necessary; it traps if it fails. This may
// update *base.
MWasmLoadInstance* boundsCheckLimit = needBoundsCheck();
if (boundsCheckLimit) {
performBoundsCheck(base, boundsCheckLimit);
}
#ifndef JS_64BIT
if (isMem64()) {
// We must have had an explicit bounds check (or one was elided if it was
// proved redundant), and on 32-bit systems the index will for sure fit in
// 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
// simplify the back-end.
MOZ_ASSERT((*base)->type() == MIRType::Int64);
MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled());
auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
MOZ_ASSERT(chopped->type() == MIRType::Int32);
curBlock_->add(chopped);
*base = chopped;
}
#endif
}
bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
if (result == ValType::I64 && access->byteSize() <= 4) {
// These smaller accesses should all be zero-extending.
MOZ_ASSERT(!isSignedIntType(access->type()));
return true;
}
return false;
}
public:
bool isMem32() { return moduleEnv_.memory->indexType() == IndexType::I32; }
bool isMem64() { return moduleEnv_.memory->indexType() == IndexType::I64; }
// Sometimes, we need to determine the memory type before the opcode reader
// that will reject a memory opcode in the presence of no-memory gets a chance
// to do so. This predicate is safe.
bool isNoMemOrMem32() {
return !moduleEnv_.usesMemory() ||
moduleEnv_.memory->indexType() == IndexType::I32;
}
// Add the offset into the pointer to yield the EA; trap on overflow.
MDefinition* computeEffectiveAddress(MDefinition* base,
MemoryAccessDesc* access) {
if (inDeadCode()) {
return nullptr;
}
uint64_t offset = access->offset64();
if (offset == 0) {
return base;
}
auto* ins = MWasmAddOffset::New(alloc(), base, offset, bytecodeOffset());
curBlock_->add(ins);
access->clearOffset();
return ins;
}
MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
ValType result) {
if (inDeadCode()) {
return nullptr;
}
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MInstruction* load = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset64() == 0);
MWasmLoadInstance* boundsCheckLimit =
maybeLoadBoundsCheckLimit(MIRType::Int32);
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type());
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
load =
MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
}
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
if (inDeadCode()) {
return;
}
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MInstruction* store = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset64() == 0);
MWasmLoadInstance* boundsCheckLimit =
maybeLoadBoundsCheckLimit(MIRType::Int32);
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type(), v);
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
}
if (!store) {
return;
}
curBlock_->add(store);
}
MDefinition* atomicCompareExchangeHeap(MDefinition* base,
MemoryAccessDesc* access,
ValType result, MDefinition* oldv,
MDefinition* newv) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtOldv =
MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
curBlock_->add(cvtOldv);
oldv = cvtOldv;
auto* cvtNewv =
MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
curBlock_->add(cvtNewv);
newv = cvtNewv;
}
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MInstruction* cas = MWasmCompareExchangeHeap::New(
alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
instancePointer_);
if (!cas) {
return nullptr;
}
curBlock_->add(cas);
if (isSmallerAccessForI64(result, access)) {
cas = MExtendInt32ToInt64::New(alloc(), cas, true);
curBlock_->add(cas);
}
return cas;
}
MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
ValType result, MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MInstruction* xchg =
MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, value, instancePointer_);
if (!xchg) {
return nullptr;
}
curBlock_->add(xchg);
if (isSmallerAccessForI64(result, access)) {
xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
curBlock_->add(xchg);
}
return xchg;
}
MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
MemoryAccessDesc* access, ValType result,
MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MInstruction* binop =
MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
base, *access, value, instancePointer_);
if (!binop) {
return nullptr;
}
curBlock_->add(binop);
if (isSmallerAccessForI64(result, access)) {
binop = MExtendInt32ToInt64::New(alloc(), binop, true);
curBlock_->add(binop);
}
return binop;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* loadSplatSimd128(Scalar::Type viewType,
const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp splatOp) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(viewType, addr.align, addr.offset,
bytecodeIfNotAsmJS());
// Generate better code (on x86)
// If AVX2 is enabled, more broadcast operators are available.
if (viewType == Scalar::Float64
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
|| (js::jit::CPUInfo::IsAVX2Present() &&
(viewType == Scalar::Uint8 || viewType == Scalar::Uint16 ||
viewType == Scalar::Float32))
# endif
) {
access.setSplatSimd128Load();
return load(addr.base, &access, ValType::V128);
}
ValType resultType = ValType::I32;
if (viewType == Scalar::Float32) {
resultType = ValType::F32;
splatOp = wasm::SimdOp::F32x4Splat;
}
auto* scalar = load(addr.base, &access, resultType);
if (!inDeadCode() && !scalar) {
return nullptr;
}
return scalarToSimd128(scalar, splatOp);
}
MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
// Generate better code (on x86) by loading as a double with an
// operation that sign extends directly.
MemoryAccessDesc access(Scalar::Float64, addr.align, addr.offset,
bytecodeIfNotAsmJS());
access.setWidenSimd128Load(op);
return load(addr.base, &access, ValType::V128);
}
MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
const LinearMemoryAddress<MDefinition*>& addr) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(viewType, addr.align, addr.offset,
bytecodeIfNotAsmJS());
access.setZeroExtendSimd128Load();
return load(addr.base, &access, ValType::V128);
}
MDefinition* loadLaneSimd128(uint32_t laneSize,
const LinearMemoryAddress<MDefinition*>& addr,
uint32_t laneIndex, MDefinition* src) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
bytecodeIfNotAsmJS());
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MDefinition* base = addr.base;
MOZ_ASSERT(!moduleEnv_.isAsmJS());
checkOffsetAndAlignmentAndBounds(&access, &base);
# ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
# endif
MInstruction* load = MWasmLoadLaneSimd128::New(
alloc(), memoryBase, base, access, laneSize, laneIndex, src);
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void storeLaneSimd128(uint32_t laneSize,
const LinearMemoryAddress<MDefinition*>& addr,
uint32_t laneIndex, MDefinition* src) {
if (inDeadCode()) {
return;
}
MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
bytecodeIfNotAsmJS());
MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
MDefinition* base = addr.base;
MOZ_ASSERT(!moduleEnv_.isAsmJS());
checkOffsetAndAlignmentAndBounds(&access, &base);
# ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
# endif
MInstruction* store = MWasmStoreLaneSimd128::