Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmIonCompile.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jit/ABIArgGenerator.h"
#include "jit/CodeGenerator.h"
#include "jit/CompileInfo.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/ShuffleAnalysis.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmIntrinsic.h"
#include "wasm/WasmOpIter.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::IsPowerOfTwo;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
namespace {
using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
struct IonCompilePolicy {
// We store SSA definitions in the value stack.
using Value = MDefinition*;
using ValueVector = DefVector;
// We store loop headers and then/else blocks in the control flow stack.
using ControlItem = MBasicBlock*;
};
using IonOpIter = OpIter<IonCompilePolicy>;
class FunctionCompiler;
// CallCompileState describes a call that is being compiled.
class CallCompileState {
// A generator object that is passed each argument as it is compiled.
WasmABIArgGenerator abi_;
// Accumulates the register arguments while compiling arguments.
MWasmCall::Args regArgs_;
// Reserved argument for passing Instance* to builtin instance method calls.
ABIArg instanceArg_;
// The stack area in which the callee will write stack return values, or
// nullptr if no stack results.
MWasmStackResultArea* stackResultArea_ = nullptr;
// Only FunctionCompiler should be directly manipulating CallCompileState.
friend class FunctionCompiler;
};
// Encapsulates the compilation of a single function in an asm.js module. The
// function compiler handles the creation and final backend compilation of the
// MIR graph.
class FunctionCompiler {
struct ControlFlowPatch {
MControlInstruction* ins;
uint32_t index;
ControlFlowPatch(MControlInstruction* ins, uint32_t index)
: ins(ins), index(index) {}
};
using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
using ControlFlowPatchVectorVector =
Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
const ModuleEnvironment& moduleEnv_;
IonOpIter iter_;
const FuncCompileInput& func_;
const ValTypeVector& locals_;
size_t lastReadCallSite_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
MBasicBlock* curBlock_;
uint32_t maxStackArgBytes_;
uint32_t loopDepth_;
uint32_t blockDepth_;
ControlFlowPatchVectorVector blockPatches_;
// TLS pointer argument to the current function.
MWasmParameter* tlsPointer_;
MWasmParameter* stackResultPointer_;
public:
FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
const FuncCompileInput& func, const ValTypeVector& locals,
MIRGenerator& mirGen)
: moduleEnv_(moduleEnv),
iter_(moduleEnv, decoder),
func_(func),
locals_(locals),
lastReadCallSite_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
info_(mirGen.outerInfo()),
mirGen_(mirGen),
curBlock_(nullptr),
maxStackArgBytes_(0),
loopDepth_(0),
blockDepth_(0),
tlsPointer_(nullptr),
stackResultPointer_(nullptr) {}
const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
IonOpIter& iter() { return iter_; }
TempAllocator& alloc() const { return alloc_; }
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex() const { return func_.index; }
const FuncType& funcType() const {
return *moduleEnv_.funcs[func_.index].type;
}
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
BytecodeOffset bytecodeIfNotAsmJS() const {
return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
}
bool init() {
// Prepare the entry block for MIR generation:
const ArgTypeVector args(funcType());
if (!mirGen_.ensureBallast()) {
return false;
}
if (!newBlock(/* prev */ nullptr, &curBlock_)) {
return false;
}
for (WasmABIArgIter i(args); !i.done(); i++) {
MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
curBlock_->add(ins);
if (args.isSyntheticStackResultPointerArg(i.index())) {
MOZ_ASSERT(stackResultPointer_ == nullptr);
stackResultPointer_ = ins;
} else {
curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
ins);
}
if (!mirGen_.ensureBallast()) {
return false;
}
}
// Set up a parameter that receives the hidden TLS pointer argument.
tlsPointer_ =
MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
curBlock_->add(tlsPointer_);
if (!mirGen_.ensureBallast()) {
return false;
}
for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
i++) {
MInstruction* ins = nullptr;
switch (locals_[i].kind()) {
case ValType::I32:
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
break;
case ValType::I64:
ins = MConstant::NewInt64(alloc(), 0);
break;
case ValType::V128:
#ifdef ENABLE_WASM_SIMD
ins =
MWasmFloatConstant::NewSimd128(alloc(), SimdConstant::SplatX4(0));
break;
#else
return iter().fail("Ion has no SIMD support yet");
#endif
case ValType::F32:
ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
break;
case ValType::F64:
ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
break;
case ValType::Rtt:
case ValType::Ref:
ins = MWasmNullConstant::New(alloc());
break;
}
curBlock_->add(ins);
curBlock_->initSlot(info().localSlot(i), ins);
if (!mirGen_.ensureBallast()) {
return false;
}
}
return true;
}
void finish() {
mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
MOZ_ASSERT(loopDepth_ == 0);
MOZ_ASSERT(blockDepth_ == 0);
#ifdef DEBUG
for (ControlFlowPatchVector& patches : blockPatches_) {
MOZ_ASSERT(patches.empty());
}
#endif
MOZ_ASSERT(inDeadCode());
MOZ_ASSERT(done(), "all bytes must be consumed");
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
}
/************************* Read-only interface (after local scope setup) */
MIRGenerator& mirGen() const { return mirGen_; }
MIRGraph& mirGraph() const { return graph_; }
const CompileInfo& info() const { return info_; }
MDefinition* getLocalDef(unsigned slot) {
if (inDeadCode()) {
return nullptr;
}
return curBlock_->getSlot(info().localSlot(slot));
}
const ValTypeVector& locals() const { return locals_; }
/***************************** Code generation (after local scope setup) */
MDefinition* constant(const Value& v, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::New(alloc(), v, type);
curBlock_->add(constant);
return constant;
}
MDefinition* constant(float f) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(double d) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
curBlock_->add(cst);
return cst;
}
MDefinition* constant(int64_t i) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::NewInt64(alloc(), i);
curBlock_->add(constant);
return constant;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* constant(V128 v) {
if (inDeadCode()) {
return nullptr;
}
MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
curBlock_->add(constant);
return constant;
}
#endif
MDefinition* nullRefConstant() {
if (inDeadCode()) {
return nullptr;
}
// MConstant has a lot of baggage so we don't use that here.
MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
curBlock_->add(constant);
return constant;
}
void fence() {
if (inDeadCode()) {
return;
}
MWasmFence* ins = MWasmFence::New(alloc());
curBlock_->add(ins);
}
template <class T>
MDefinition* unary(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* unary(MDefinition* op, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
bool mustPreserveNaN(MIRType type) {
return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
}
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x - 0.0 because of NaN with custom payloads.
MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
curBlock_->add(ins);
return ins;
}
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool isMax) {
if (inDeadCode()) {
return nullptr;
}
if (mustPreserveNaN(type)) {
// Convert signaling NaN to quiet NaNs.
MDefinition* zero = constant(DoubleValue(0.0), type);
lhs = sub(lhs, zero, type);
rhs = sub(rhs, zero, type);
}
MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
curBlock_->add(ins);
return ins;
}
MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
MMul::Mode mode) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x * 1.0 because of NaN with custom payloads.
auto* ins =
MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// Enforce the signedness of the operation by coercing the operands
// to signed. Otherwise, operands that "look" unsigned to Ion but
// are not unsigned to Baldr (eg, unsigned right shifts) may lead to
// the operation being executed unsigned. Applies to mod() as well.
//
// Do this for Int32 only since Int64 is not subject to the same
// issues.
//
// Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
// but it doesn't matter: they're not codegen'd to calls since inputs
// already are int32.
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 div via c++ builtin.
// A call to c++ builtin requires tls pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinDivI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset(), mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MInstruction* createTruncateToInt32(MDefinition* op) {
if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
return MWasmBuiltinTruncateToInt32::New(alloc(), op, tlsPointer_);
}
return MTruncateToInt32::New(alloc(), op);
}
MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// See block comment in div().
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 mod via c++ builtin.
// A call to c++ builtin requires tls pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinModI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
// Should be handled separately because we call BuiltinThunk for this case
// and so, need to add the dependency from tlsPointer.
if (type == MIRType::Double) {
auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, tlsPointer_, type,
bytecodeOffset());
curBlock_->add(ins);
return ins;
}
auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset());
curBlock_->add(ins);
return ins;
}
MDefinition* bitnot(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MBitNot::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
MDefinition* condExpr) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
curBlock_->add(ins);
return ins;
}
MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
curBlock_->add(ins);
return ins;
}
MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
uint32_t targetSize) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* ins;
switch (targetSize) {
case 4: {
MSignExtendInt32::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt32::Byte;
break;
case 2:
mode = MSignExtendInt32::Half;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt32::New(alloc(), op, mode);
break;
}
case 8: {
MSignExtendInt64::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt64::Byte;
break;
case 2:
mode = MSignExtendInt64::Half;
break;
case 4:
mode = MSignExtendInt64::Word;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt64::New(alloc(), op, mode);
break;
}
default: {
MOZ_CRASH("Bad sign extension");
}
}
curBlock_->add(ins);
return ins;
}
MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
#if defined(JS_CODEGEN_ARM)
auto* ins = MBuiltinInt64ToFloatingPoint::New(
alloc(), op, tlsPointer_, type, bytecodeOffset(), isUnsigned);
#else
auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
isUnsigned);
#endif
curBlock_->add(ins);
return ins;
}
MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
bool left) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MRotate::New(alloc(), input, count, type, left);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* truncate(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#if defined(JS_CODEGEN_ARM)
MDefinition* truncateWithTls(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, tlsPointer_,
flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
MCompare::CompareType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
curBlock_->add(ins);
return ins;
}
void assign(unsigned slot, MDefinition* def) {
if (inDeadCode()) {
return;
}
curBlock_->setSlot(info().localSlot(slot), def);
}
#ifdef ENABLE_WASM_SIMD
// About Wasm SIMD as supported by Ion:
//
// The expectation is that Ion will only ever support SIMD on x86 and x64,
// since Cranelift will be the optimizing compiler for Arm64, ARMv7 will cease
// to be a tier-1 platform soon, and MIPS64 will never implement SIMD.
//
// The division of the operations into MIR nodes reflects that expectation,
// and is a good fit for x86/x64. Should the expectation change we'll
// possibly want to re-architect the SIMD support to be a little more general.
//
// Most SIMD operations map directly to a single MIR node that ultimately ends
// up being expanded in the macroassembler.
//
// Some SIMD operations that do have a complete macroassembler expansion are
// open-coded into multiple MIR nodes here; in some cases that's just
// convenience, in other cases it may also allow them to benefit from Ion
// optimizations. The reason for the expansions will be documented by a
// comment.
// (v128,v128) -> v128 effect-free binary operations
MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
bool commutative, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Simd128);
auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
curBlock_->add(ins);
return ins;
}
// (v128,i32) -> v128 effect-free shift operations
MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Int32);
auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
curBlock_->add(ins);
return ins;
}
// (v128,scalar,imm) -> v128
MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
uint32_t laneIndex, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128);
auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
curBlock_->add(ins);
return ins;
}
// (scalar) -> v128 effect-free unary operations
MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128) -> v128 effect-free unary operations
MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128, imm) -> scalar effect-free unary operations
MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
uint32_t imm = 0) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins =
MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
curBlock_->add(ins);
return ins;
}
// (v128, v128, v128) -> v128 effect-free operations
MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
v1->type() == MIRType::Simd128 &&
v2->type() == MIRType::Simd128);
auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
curBlock_->add(ins);
return ins;
}
// (v128, v128, imm_v128) -> v128 effect-free operations
MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v1->type() == MIRType::Simd128);
MOZ_ASSERT(v2->type() == MIRType::Simd128);
auto* ins = BuildWasmShuffleSimd128(
alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
curBlock_->add(ins);
return ins;
}
// Also see below for SIMD memory references
#endif // ENABLE_WASM_SIMD
/************************************************ Linear memory accesses */
// For detailed information about memory accesses, see "Linear memory
// addresses and bounds checking" in WasmMemory.cpp.
private:
// If the platform does not have a HeapReg, load the memory base from Tls.
MWasmLoadTls* maybeLoadMemoryBase() {
MWasmLoadTls* load = nullptr;
#ifdef JS_CODEGEN_X86
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
load = MWasmLoadTls::New(alloc(), tlsPointer_,
offsetof(wasm::TlsData, memoryBase),
MIRType::Pointer, aliases);
curBlock_->add(load);
#endif
return load;
}
public:
// A value holding the memory base, whether that's HeapReg or some other
// register.
MWasmHeapBase* memoryBase() {
MWasmHeapBase* base = nullptr;
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
curBlock_->add(base);
return base;
}
private:
// If the bounds checking strategy requires it, load the bounds check limit
// from the Tls.
MWasmLoadTls* maybeLoadBoundsCheckLimit(MIRType type) {
MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
if (moduleEnv_.hugeMemoryEnabled()) {
return nullptr;
}
AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
auto* load = MWasmLoadTls::New(alloc(), tlsPointer_,
offsetof(wasm::TlsData, boundsCheckLimit),
type, aliases);
curBlock_->add(load);
return load;
}
// Return true if the access requires an alignment check. If so, sets
// *mustAdd to true if the offset must be added to the pointer before
// checking.
bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
bool* mustAdd) {
MOZ_ASSERT(!*mustAdd);
// asm.js accesses are always aligned and need no checks.
if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
return false;
}
// If the EA is known and aligned it will need no checks.
if (base->isConstant()) {
// We only care about the low bits, so overflow is OK, as is chopping off
// the high bits of an i64 pointer.
uint32_t ptr = 0;
if (isMem64()) {
ptr = uint32_t(base->toConstant()->toInt64());
} else {
ptr = base->toConstant()->toInt32();
}
if (((ptr + access->offset()) & (access->byteSize() - 1)) == 0) {
return false;
}
}
// If the offset is aligned then the EA is just the pointer, for
// the purposes of this check.
*mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
return true;
}
// Fold a constant base into the offset and make the base 0, provided the
// offset stays below the guard limit. The reason for folding the base into
// the offset rather than vice versa is that a small offset can be ignored
// by both explicit bounds checking and bounds check elimination.
void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
uint32_t offsetGuardLimit =
GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
if ((*base)->isConstant()) {
uint64_t basePtr = 0;
if (isMem64()) {
basePtr = uint64_t((*base)->toConstant()->toInt64());
} else {
basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
}
uint32_t offset = access->offset();
if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
offset += uint32_t(basePtr);
access->setOffset(offset);
MConstant* ins = nullptr;
if (isMem64()) {
ins = MConstant::NewInt64(alloc(), 0);
} else {
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
}
curBlock_->add(ins);
*base = ins;
}
}
}
// If the offset must be added because it is large or because the true EA must
// be checked, compute the effective address, trapping on overflow.
void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
MDefinition** base, bool mustAddOffset) {
uint32_t offsetGuardLimit =
GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
if (access->offset() >= offsetGuardLimit || mustAddOffset ||
!JitOptions.wasmFoldOffsets) {
*base = computeEffectiveAddress(*base, access);
}
}
MWasmLoadTls* needBoundsCheck() {
#ifdef JS_64BIT
// For 32-bit base pointers:
//
// If the bounds check uses the full 64 bits of the bounds check limit, then
// the base pointer must be zero-extended to 64 bits before checking and
// wrapped back to 32-bits after Spectre masking. (And it's important that
// the value we end up with has flowed through the Spectre mask.)
//
// If the memory's max size is known to be smaller than 64K pages exactly,
// we can use a 32-bit check and avoid extension and wrapping.
bool mem32LimitIs64Bits =
isMem32() && !moduleEnv_.memory->boundsCheckLimitIs32Bits() &&
ArrayBufferObject::maxBufferByteLength() >= 0x100000000;
#else
// On 32-bit platforms we have no more than 2GB memory and the limit for a
// 32-bit base pointer is never a 64-bit value.
bool mem32LimitIs64Bits = false;
#endif
return maybeLoadBoundsCheckLimit(
mem32LimitIs64Bits || isMem64() ? MIRType::Int64 : MIRType::Int32);
}
void performBoundsCheck(MDefinition** base, MWasmLoadTls* boundsCheckLimit) {
// At the outset, actualBase could be the result of pretty much any integer
// operation, or it could be the load of an integer constant. If its type
// is i32, we may assume the value has a canonical representation for the
// platform, see doc block in MacroAssembler.h.
MDefinition* actualBase = *base;
// Extend an i32 index value to perform a 64-bit bounds check if the memory
// can be 4GB or larger.
bool extendAndWrapIndex =
isMem32() && boundsCheckLimit->type() == MIRType::Int64;
if (extendAndWrapIndex) {
auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
curBlock_->add(extended);
actualBase = extended;
}
auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
bytecodeOffset());
curBlock_->add(ins);
actualBase = ins;
// If we're masking, then we update *base to create a dependency chain
// through the masked index. But we will first need to wrap the index
// value if it was extended above.
if (JitOptions.spectreIndexMasking) {
if (extendAndWrapIndex) {
auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
curBlock_->add(wrapped);
actualBase = wrapped;
}
*base = actualBase;
}
}
// Perform all necessary checking before a wasm heap access, based on the
// attributes of the access and base pointer.
//
// For 64-bit indices on platforms that are limited to indices that fit into
// 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
// `base` that has type Int32. Lowering code depends on this and will assert
// that the base has this type. See the end of this function.
void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
MDefinition** base) {
MOZ_ASSERT(!inDeadCode());
MOZ_ASSERT(!moduleEnv_.isAsmJS());
// Attempt to fold an offset into a constant base pointer so as to simplify
// the addressing expression. This may update *base.
foldConstantPointer(access, base);
// Determine whether an alignment check is needed and whether the offset
// must be checked too.
bool mustAddOffsetForAlignmentCheck = false;
bool alignmentCheck =
needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
// If bounds checking or alignment checking requires it, compute the
// effective address: add the offset into the pointer and trap on overflow.
// This may update *base.
maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
// Emit the alignment check if necessary; it traps if it fails.
if (alignmentCheck) {
curBlock_->add(MWasmAlignmentCheck::New(
alloc(), *base, access->byteSize(), bytecodeOffset()));
}
// Emit the bounds check if necessary; it traps if it fails. This may
// update *base.
MWasmLoadTls* boundsCheckLimit = needBoundsCheck();
if (boundsCheckLimit) {
performBoundsCheck(base, boundsCheckLimit);
}
#ifndef JS_64BIT
if (isMem64()) {
// We must have had an explicit bounds check (or one was elided if it was
// proved redundant), and on 32-bit systems the index will for sure fit in
// 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
// simplify the back-end.
MOZ_ASSERT((*base)->type() == MIRType::Int64);
MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled());
auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
MOZ_ASSERT(chopped->type() == MIRType::Int32);
curBlock_->add(chopped);
*base = chopped;
}
#endif
}
bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
if (result == ValType::I64 && access->byteSize() <= 4) {
// These smaller accesses should all be zero-extending.
MOZ_ASSERT(!isSignedIntType(access->type()));
return true;
}
return false;
}
public:
bool isMem32() { return moduleEnv_.memory->indexType() == IndexType::I32; }
bool isMem64() { return moduleEnv_.memory->indexType() == IndexType::I64; }
// Add the offset into the pointer to yield the EA; trap on overflow.
MDefinition* computeEffectiveAddress(MDefinition* base,
MemoryAccessDesc* access) {
if (inDeadCode()) {
return nullptr;
}
if (!access->offset()) {
return base;
}
auto* ins =
MWasmAddOffset::New(alloc(), base, access->offset(), bytecodeOffset());
curBlock_->add(ins);
access->clearOffset();
return ins;
}
MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
ValType result) {
if (inDeadCode()) {
return nullptr;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* load = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit =
maybeLoadBoundsCheckLimit(MIRType::Int32);
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type());
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
load =
MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
}
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
if (inDeadCode()) {
return;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* store = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset() == 0);
MWasmLoadTls* boundsCheckLimit =
maybeLoadBoundsCheckLimit(MIRType::Int32);
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type(), v);
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
}
if (!store) {
return;
}
curBlock_->add(store);
}
MDefinition* atomicCompareExchangeHeap(MDefinition* base,
MemoryAccessDesc* access,
ValType result, MDefinition* oldv,
MDefinition* newv) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtOldv =
MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
curBlock_->add(cvtOldv);
oldv = cvtOldv;
auto* cvtNewv =
MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
curBlock_->add(cvtNewv);
newv = cvtNewv;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* cas =
MWasmCompareExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, oldv, newv, tlsPointer_);
if (!cas) {
return nullptr;
}
curBlock_->add(cas);
if (isSmallerAccessForI64(result, access)) {
cas = MExtendInt32ToInt64::New(alloc(), cas, true);
curBlock_->add(cas);
}
return cas;
}
MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
ValType result, MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* xchg =
MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, value, tlsPointer_);
if (!xchg) {
return nullptr;
}
curBlock_->add(xchg);
if (isSmallerAccessForI64(result, access)) {
xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
curBlock_->add(xchg);
}
return xchg;
}
MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
MemoryAccessDesc* access, ValType result,
MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MInstruction* binop =
MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
base, *access, value, tlsPointer_);
if (!binop) {
return nullptr;
}
curBlock_->add(binop);
if (isSmallerAccessForI64(result, access)) {
binop = MExtendInt32ToInt64::New(alloc(), binop, true);
curBlock_->add(binop);
}
return binop;
}
#ifdef ENABLE_WASM_SIMD
MDefinition* loadSplatSimd128(Scalar::Type viewType,
const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp splatOp) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(viewType, addr.align, addr.offset,
bytecodeIfNotAsmJS());
// Generate better code (on x86)
if (viewType == Scalar::Float64) {
access.setSplatSimd128Load();
return load(addr.base, &access, ValType::V128);
}
ValType resultType = ValType::I32;
if (viewType == Scalar::Float32) {
resultType = ValType::F32;
splatOp = wasm::SimdOp::F32x4Splat;
}
auto* scalar = load(addr.base, &access, resultType);
if (!inDeadCode() && !scalar) {
return nullptr;
}
return scalarToSimd128(scalar, splatOp);
}
MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
wasm::SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
// Generate better code (on x86) by loading as a double with an
// operation that sign extends directly.
MemoryAccessDesc access(Scalar::Float64, addr.align, addr.offset,
bytecodeIfNotAsmJS());
access.setWidenSimd128Load(op);
return load(addr.base, &access, ValType::V128);
}
MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
const LinearMemoryAddress<MDefinition*>& addr) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(viewType, addr.align, addr.offset,
bytecodeIfNotAsmJS());
access.setZeroExtendSimd128Load();
return load(addr.base, &access, ValType::V128);
}
MDefinition* loadLaneSimd128(uint32_t laneSize,
const LinearMemoryAddress<MDefinition*>& addr,
uint32_t laneIndex, MDefinition* src) {
if (inDeadCode()) {
return nullptr;
}
MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
bytecodeIfNotAsmJS());
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MDefinition* base = addr.base;
MOZ_ASSERT(!moduleEnv_.isAsmJS());
checkOffsetAndAlignmentAndBounds(&access, &base);
# ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
# endif
MInstruction* load = MWasmLoadLaneSimd128::New(
alloc(), memoryBase, base, access, laneSize, laneIndex, src);
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void storeLaneSimd128(uint32_t laneSize,
const LinearMemoryAddress<MDefinition*>& addr,
uint32_t laneIndex, MDefinition* src) {
if (inDeadCode()) {
return;
}
MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
bytecodeIfNotAsmJS());
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
MDefinition* base = addr.base;
MOZ_ASSERT(!moduleEnv_.isAsmJS());
checkOffsetAndAlignmentAndBounds(&access, &base);
# ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
# endif
MInstruction* store = MWasmStoreLaneSimd128::New(
alloc(), memoryBase, base, access, laneSize, laneIndex, src);
if (!store) {
return;
}
curBlock_->add(store);
}
#endif // ENABLE_WASM_SIMD
/************************************************ Global variable accesses */
MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst,
bool isIndirect, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* load;
if (isIndirect) {
// Pull a pointer to the value out of TlsData::globalArea, then
// load from that pointer. Note that the pointer is immutable
// even though the value it points at may change, hence the use of
// |true| for the first node's |isConst| value, irrespective of
// the |isConst| formal parameter to this method. The latter
// applies to the denoted value as a whole.
auto* cellPtr =
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
/*isConst=*/true, tlsPointer_);
curBlock_->add(cellPtr);
load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
} else {
// Pull the value directly out of TlsData::globalArea.
load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst,
tlsPointer_);
}
curBlock_->add(load);
return load;
}
MInstruction* storeGlobalVar(uint32_t globalDataOffset, bool isIndirect,
MDefinition* v) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* store;
MInstruction* valueAddr = nullptr;
if (isIndirect) {
// Pull a pointer to the value out of TlsData::globalArea, then
// store through that pointer.
auto* cellPtr =
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
/*isConst=*/true, tlsPointer_);
curBlock_->add(cellPtr);
if (v->type() == MIRType::RefOrNull) {
valueAddr = cellPtr;
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
AliasSet::WasmGlobalCell);
} else {
store = MWasmStoreGlobalCell::New(alloc(), v, cellPtr);
}
} else {
// Store the value directly in TlsData::globalArea.
if (v->type() == MIRType::RefOrNull) {
valueAddr = MWasmDerivedPointer::New(
alloc(), tlsPointer_,
offsetof(wasm::TlsData, globalArea) + globalDataOffset);
curBlock_->add(valueAddr);
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
AliasSet::WasmGlobalVar);
} else {
store =
MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_);
}
}
curBlock_->add(store);
return valueAddr;
}
void addInterruptCheck() {
if (inDeadCode()) {
return;
}
curBlock_->add(
MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
}
/***************************************************************** Calls */
// The IonMonkey backend maintains a single stack offset (from the stack
// pointer to the base of the frame) by adding the total amount of spill
// space required plus the maximum stack required for argument passing.
// Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
// manually accumulate, for the entire function, the maximum required stack
// space for argument passing. (This is passed to the CodeGenerator via
// MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
// stack space required for each individual call (as determined by the call
// ABI).