Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmIonCompile.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include <algorithm>
#include "jit/ABIArgGenerator.h"
#include "jit/CodeGenerator.h"
#include "jit/CompileInfo.h"
#include "jit/Ion.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/MIR.h"
#include "jit/ShuffleAnalysis.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmBuiltinModule.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmGC.h"
#include "wasm/WasmGcObject.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmOpIter.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::IsPowerOfTwo;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
namespace {
using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
// To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
// from being just an MBasicBlock* to a Control structure collecting additional
// information.
using ControlInstructionVector =
Vector<MControlInstruction*, 8, SystemAllocPolicy>;
struct TryControl {
// Branches to bind to the try's landing pad.
ControlInstructionVector landingPadPatches;
// For `try_table`, the list of tagged catches and labels to branch to.
TryTableCatchVector catches;
// Whether this try is in the body and should catch any thrown exception.
bool inBody;
TryControl() : inBody(false) {}
// Reset the try control for when it is cached in FunctionCompiler.
void reset() {
landingPadPatches.clearAndFree();
catches.clearAndFree();
inBody = false;
}
};
using UniqueTryControl = UniquePtr<TryControl>;
using VectorUniqueTryControl = Vector<UniqueTryControl, 2, SystemAllocPolicy>;
struct Control {
MBasicBlock* block;
UniqueTryControl tryControl;
Control() : block(nullptr), tryControl(nullptr) {}
Control(Control&&) = default;
Control(const Control&) = delete;
};
// [SMDOC] WebAssembly Exception Handling in Ion
// =======================================================
//
// ## Throwing instructions
//
// Wasm exceptions can be thrown by either a throw instruction (local throw),
// or by a wasm call.
//
// ## The "catching try control"
//
// We know we are in try-code if there is a surrounding ControlItem with
// LabelKind::Try. The innermost such control is called the
// "catching try control".
//
// ## Throws without a catching try control
//
// Such throws are implemented with an instance call that triggers the exception
// unwinding runtime. The exception unwinding runtime will not return to the
// function.
//
// ## "landing pad" and "pre-pad" blocks
//
// When an exception is thrown, the unwinder will search for the nearest
// enclosing try block and redirect control flow to it. The code that executes
// before any catch blocks is called the 'landing pad'. The 'landing pad' is
// responsible to:
// 1. Consume the pending exception state from
// Instance::pendingException(Tag)
// 2. Branch to the correct catch block, or else rethrow
//
// There is one landing pad for each try block. The immediate predecessors of
// the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
// throwing instruction.
//
// ## Creating pre-pad blocks
//
// There are two possible sorts of pre-pad blocks, depending on whether we
// are branching after a local throw instruction, or after a wasm call:
//
// - If we encounter a local throw, we create the exception and tag objects,
// store them to Instance::pendingException(Tag), and then jump to the
// landing pad.
//
// - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
// control instruction with either a branch to a fallthrough block or
// to a pre-pad block.
//
// The pre-pad block for a wasm call is empty except for a jump to the
// landing pad. It only exists to avoid critical edges which when split would
// violate the invariants of MWasmCallCatchable. The pending exception state
// is taken care of by the unwinder.
//
// Each pre-pad ends with a pending jump to the landing pad. The pending jumps
// to the landing pad are tracked in `tryPadPatches`. These are called
// "pad patches".
//
// ## Creating the landing pad
//
// When we exit try-code, we check if tryPadPatches has captured any control
// instructions (pad patches). If not, we don't compile any catches and we mark
// the rest as dead code.
//
// If there are pre-pad blocks, we join them to create a landing pad (or just
// "pad"). The pad's last two slots are the caught exception, and the
// exception's tag object.
//
// There are three different forms of try-catch/catch_all Wasm instructions,
// which result in different form of landing pad.
//
// 1. A catchless try, so a Wasm instruction of the form "try ... end".
// - In this case, we end the pad by rethrowing the caught exception.
//
// 2. A single catch_all after a try.
// - If the first catch after a try is a catch_all, then there won't be
// any more catches, but we need the exception and its tag object, in
// case the code in a catch_all contains "rethrow" instructions.
// - The Wasm instruction "rethrow", gets the exception and tag object to
// rethrow from the last two slots of the landing pad which, due to
// validation, is the l'th surrounding ControlItem.
// - We immediately GoTo to a new block after the pad and pop both the
// exception and tag object, as we don't need them anymore in this case.
//
// 3. Otherwise, there is one or more catch code blocks following.
// - In this case, we construct the landing pad by creating a sequence
// of compare and branch blocks that compare the pending exception tag
// object to the tag object of the current tagged catch block. This is
// done incrementally as we visit each tagged catch block in the bytecode
// stream. At every step, we update the ControlItem's block to point to
// the next block to be created in the landing pad sequence. The final
// block will either be a rethrow, if there is no catch_all, or else a
// jump to a catch_all block.
struct IonCompilePolicy {
// We store SSA definitions in the value stack.
using Value = MDefinition*;
using ValueVector = DefVector;
// We store loop headers and then/else blocks in the control flow stack.
// In the case of try-catch control blocks, we collect additional information
// regarding the possible paths from throws and calls to a landing pad, as
// well as information on the landing pad's handlers (its catches).
using ControlItem = Control;
};
using IonOpIter = OpIter<IonCompilePolicy>;
class FunctionCompiler;
// CallCompileState describes a call that is being compiled.
class CallCompileState {
// A generator object that is passed each argument as it is compiled.
WasmABIArgGenerator abi_;
// Accumulates the register arguments while compiling arguments.
MWasmCallBase::Args regArgs_;
// Reserved argument for passing Instance* to builtin instance method calls.
ABIArg instanceArg_;
// The stack area in which the callee will write stack return values, or
// nullptr if no stack results.
MWasmStackResultArea* stackResultArea_ = nullptr;
// Indicates that the call is a return/tail call.
bool returnCall = false;
// Only FunctionCompiler should be directly manipulating CallCompileState.
friend class FunctionCompiler;
};
// Encapsulates the compilation of a single function in an asm.js module. The
// function compiler handles the creation and final backend compilation of the
// MIR graph.
class FunctionCompiler {
struct ControlFlowPatch {
MControlInstruction* ins;
uint32_t index;
ControlFlowPatch(MControlInstruction* ins, uint32_t index)
: ins(ins), index(index) {}
};
using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
struct PendingBlockTarget {
ControlFlowPatchVector patches;
BranchHint hint = BranchHint::Invalid;
};
using PendingBlockTargetVector =
Vector<PendingBlockTarget, 0, SystemAllocPolicy>;
const ModuleEnvironment& moduleEnv_;
IonOpIter iter_;
uint32_t functionBodyOffset_;
const FuncCompileInput& func_;
const ValTypeVector& locals_;
size_t lastReadCallSite_;
TempAllocator& alloc_;
MIRGraph& graph_;
const CompileInfo& info_;
MIRGenerator& mirGen_;
MBasicBlock* curBlock_;
uint32_t maxStackArgBytes_;
uint32_t loopDepth_;
uint32_t blockDepth_;
PendingBlockTargetVector pendingBlocks_;
// Control flow patches created by `delegate` instructions that target the
// outermost label of this function. These will be bound to a pad that will
// do a rethrow in `emitBodyDelegateThrowPad`.
ControlInstructionVector bodyDelegatePadPatches_;
// Instance pointer argument to the current function.
MWasmParameter* instancePointer_;
MWasmParameter* stackResultPointer_;
// Reference to masm.tryNotes_
wasm::TryNoteVector& tryNotes_;
// Cache of TryControl to minimize heap allocations
VectorUniqueTryControl tryControlCache_;
public:
FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
const FuncCompileInput& func, const ValTypeVector& locals,
MIRGenerator& mirGen, TryNoteVector& tryNotes)
: moduleEnv_(moduleEnv),
iter_(moduleEnv, decoder),
functionBodyOffset_(decoder.beginOffset()),
func_(func),
locals_(locals),
lastReadCallSite_(0),
alloc_(mirGen.alloc()),
graph_(mirGen.graph()),
info_(mirGen.outerInfo()),
mirGen_(mirGen),
curBlock_(nullptr),
maxStackArgBytes_(0),
loopDepth_(0),
blockDepth_(0),
instancePointer_(nullptr),
stackResultPointer_(nullptr),
tryNotes_(tryNotes) {}
const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
IonOpIter& iter() { return iter_; }
uint32_t relativeBytecodeOffset() {
return readBytecodeOffset() - functionBodyOffset_;
}
TempAllocator& alloc() const { return alloc_; }
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex() const { return func_.index; }
const FuncType& funcType() const {
return *moduleEnv_.funcs[func_.index].type;
}
MBasicBlock* getCurBlock() const { return curBlock_; }
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
BytecodeOffset bytecodeIfNotAsmJS() const {
return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
}
FeatureUsage featureUsage() const { return iter_.featureUsage(); }
// Try to get a free TryControl from the cache, or allocate a new one.
[[nodiscard]] UniqueTryControl newTryControl() {
if (tryControlCache_.empty()) {
return UniqueTryControl(js_new<TryControl>());
}
UniqueTryControl tryControl = std::move(tryControlCache_.back());
tryControlCache_.popBack();
return tryControl;
}
// Release the TryControl to the cache.
void freeTryControl(UniqueTryControl&& tryControl) {
// Ensure that it's in a consistent state
tryControl->reset();
// Ignore any OOM, as we'll fail later
(void)tryControlCache_.append(std::move(tryControl));
}
[[nodiscard]] bool init() {
// Prepare the entry block for MIR generation:
const ArgTypeVector args(funcType());
if (!mirGen_.ensureBallast()) {
return false;
}
if (!newBlock(/* prev */ nullptr, &curBlock_)) {
return false;
}
for (WasmABIArgIter i(args); !i.done(); i++) {
MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
curBlock_->add(ins);
if (args.isSyntheticStackResultPointerArg(i.index())) {
MOZ_ASSERT(stackResultPointer_ == nullptr);
stackResultPointer_ = ins;
} else {
curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
ins);
}
if (!mirGen_.ensureBallast()) {
return false;
}
}
// Set up a parameter that receives the hidden instance pointer argument.
instancePointer_ =
MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
curBlock_->add(instancePointer_);
if (!mirGen_.ensureBallast()) {
return false;
}
for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
i++) {
ValType slotValType = locals_[i];
#ifndef ENABLE_WASM_SIMD
if (slotValType == ValType::V128) {
return iter().fail("Ion has no SIMD support yet");
}
#endif
MDefinition* zero = constantZeroOfValType(slotValType);
curBlock_->initSlot(info().localSlot(i), zero);
if (!mirGen_.ensureBallast()) {
return false;
}
}
return true;
}
void finish() {
mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
MOZ_ASSERT(loopDepth_ == 0);
MOZ_ASSERT(blockDepth_ == 0);
#ifdef DEBUG
for (PendingBlockTarget& targets : pendingBlocks_) {
MOZ_ASSERT(targets.patches.empty());
}
#endif
MOZ_ASSERT(inDeadCode());
MOZ_ASSERT(done(), "all bytes must be consumed");
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
}
/************************* Read-only interface (after local scope setup) */
MIRGenerator& mirGen() const { return mirGen_; }
MIRGraph& mirGraph() const { return graph_; }
const CompileInfo& info() const { return info_; }
MDefinition* getLocalDef(unsigned slot) {
if (inDeadCode()) {
return nullptr;
}
return curBlock_->getSlot(info().localSlot(slot));
}
const ValTypeVector& locals() const { return locals_; }
/*********************************************************** Constants ***/
MDefinition* constantF32(float f) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
curBlock_->add(cst);
return cst;
}
// Hide all other overloads, to guarantee no implicit argument conversion.
template <typename T>
MDefinition* constantF32(T) = delete;
MDefinition* constantF64(double d) {
if (inDeadCode()) {
return nullptr;
}
auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
curBlock_->add(cst);
return cst;
}
template <typename T>
MDefinition* constantF64(T) = delete;
MDefinition* constantI32(int32_t i) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant =
MConstant::New(alloc(), Int32Value(i), MIRType::Int32);
curBlock_->add(constant);
return constant;
}
template <typename T>
MDefinition* constantI32(T) = delete;
MDefinition* constantI64(int64_t i) {
if (inDeadCode()) {
return nullptr;
}
MConstant* constant = MConstant::NewInt64(alloc(), i);
curBlock_->add(constant);
return constant;
}
template <typename T>
MDefinition* constantI64(T) = delete;
// Produce an MConstant of the machine's target int type (Int32 or Int64).
MDefinition* constantTargetWord(intptr_t n) {
return targetIs64Bit() ? constantI64(int64_t(n)) : constantI32(int32_t(n));
}
template <typename T>
MDefinition* constantTargetWord(T) = delete;
#ifdef ENABLE_WASM_SIMD
MDefinition* constantV128(V128 v) {
if (inDeadCode()) {
return nullptr;
}
MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
curBlock_->add(constant);
return constant;
}
template <typename T>
MDefinition* constantV128(T) = delete;
#endif
MDefinition* constantNullRef() {
if (inDeadCode()) {
return nullptr;
}
// MConstant has a lot of baggage so we don't use that here.
MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
curBlock_->add(constant);
return constant;
}
// Produce a zero constant for the specified ValType.
MDefinition* constantZeroOfValType(ValType valType) {
switch (valType.kind()) {
case ValType::I32:
return constantI32(0);
case ValType::I64:
return constantI64(int64_t(0));
#ifdef ENABLE_WASM_SIMD
case ValType::V128:
return constantV128(V128(0));
#endif
case ValType::F32:
return constantF32(0.0f);
case ValType::F64:
return constantF64(0.0);
case ValType::Ref:
return constantNullRef();
default:
MOZ_CRASH();
}
}
/***************************** Code generation (after local scope setup) */
void fence() {
if (inDeadCode()) {
return;
}
MWasmFence* ins = MWasmFence::New(alloc());
curBlock_->add(ins);
}
template <class T>
MDefinition* unary(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* unary(MDefinition* op, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), op, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
MWasmBinaryBitwise::SubOpcode subOpc) {
if (inDeadCode()) {
return nullptr;
}
T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
curBlock_->add(ins);
return ins;
}
MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
curBlock_->add(ins);
return ins;
}
bool mustPreserveNaN(MIRType type) {
return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
}
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x - 0.0 because of NaN with custom payloads.
MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
curBlock_->add(ins);
return ins;
}
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool isMax) {
if (inDeadCode()) {
return nullptr;
}
if (mustPreserveNaN(type)) {
// Convert signaling NaN to quiet NaNs.
MDefinition* zero = constantZeroOfValType(ValType::fromMIRType(type));
lhs = sub(lhs, zero, type);
rhs = sub(rhs, zero, type);
}
MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
curBlock_->add(ins);
return ins;
}
MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
MMul::Mode mode) {
if (inDeadCode()) {
return nullptr;
}
// wasm can't fold x * 1.0 because of NaN with custom payloads.
auto* ins =
MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// Enforce the signedness of the operation by coercing the operands
// to signed. Otherwise, operands that "look" unsigned to Ion but
// are not unsigned to Baldr (eg, unsigned right shifts) may lead to
// the operation being executed unsigned. Applies to mod() as well.
//
// Do this for Int32 only since Int64 is not subject to the same
// issues.
//
// Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
// but it doesn't matter: they're not codegen'd to calls since inputs
// already are int32.
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 div via c++ builtin.
// A call to c++ builtin requires instance pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset(), mustPreserveNaN(type));
curBlock_->add(ins);
return ins;
}
MInstruction* createTruncateToInt32(MDefinition* op) {
if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
}
return MTruncateToInt32::New(alloc(), op);
}
MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
bool unsignd) {
if (inDeadCode()) {
return nullptr;
}
bool trapOnError = !moduleEnv().isAsmJS();
if (!unsignd && type == MIRType::Int32) {
// See block comment in div().
auto* lhs2 = createTruncateToInt32(lhs);
curBlock_->add(lhs2);
lhs = lhs2;
auto* rhs2 = createTruncateToInt32(rhs);
curBlock_->add(rhs2);
rhs = rhs2;
}
// For x86 and arm we implement i64 mod via c++ builtin.
// A call to c++ builtin requires instance pointer.
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
if (type == MIRType::Int64) {
auto* ins =
MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
trapOnError, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
// Should be handled separately because we call BuiltinThunk for this case
// and so, need to add the dependency from instancePointer.
if (type == MIRType::Double) {
auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
type, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
bytecodeOffset());
curBlock_->add(ins);
return ins;
}
MDefinition* bitnot(MDefinition* op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MBitNot::New(alloc(), op);
curBlock_->add(ins);
return ins;
}
MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
MDefinition* condExpr) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
curBlock_->add(ins);
return ins;
}
MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
curBlock_->add(ins);
return ins;
}
MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
uint32_t targetSize) {
if (inDeadCode()) {
return nullptr;
}
MInstruction* ins;
switch (targetSize) {
case 4: {
MSignExtendInt32::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt32::Byte;
break;
case 2:
mode = MSignExtendInt32::Half;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt32::New(alloc(), op, mode);
break;
}
case 8: {
MSignExtendInt64::Mode mode;
switch (srcSize) {
case 1:
mode = MSignExtendInt64::Byte;
break;
case 2:
mode = MSignExtendInt64::Half;
break;
case 4:
mode = MSignExtendInt64::Word;
break;
default:
MOZ_CRASH("Bad sign extension");
}
ins = MSignExtendInt64::New(alloc(), op, mode);
break;
}
default: {
MOZ_CRASH("Bad sign extension");
}
}
curBlock_->add(ins);
return ins;
}
MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
bool isUnsigned) {
if (inDeadCode()) {
return nullptr;
}
#if defined(JS_CODEGEN_ARM)
auto* ins = MBuiltinInt64ToFloatingPoint::New(
alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
#else
auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
isUnsigned);
#endif
curBlock_->add(ins);
return ins;
}
MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
bool left) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MRotate::New(alloc(), input, count, type, left);
curBlock_->add(ins);
return ins;
}
template <class T>
MDefinition* truncate(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#if defined(JS_CODEGEN_ARM)
MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
flags, bytecodeOffset());
curBlock_->add(ins);
return ins;
}
#endif
MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
MCompare::CompareType type) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
curBlock_->add(ins);
return ins;
}
void assign(unsigned slot, MDefinition* def) {
if (inDeadCode()) {
return;
}
curBlock_->setSlot(info().localSlot(slot), def);
}
MDefinition* compareIsNull(MDefinition* ref, JSOp compareOp) {
MDefinition* nullVal = constantNullRef();
if (!nullVal) {
return nullptr;
}
return compare(ref, nullVal, compareOp, MCompare::Compare_WasmAnyRef);
}
[[nodiscard]] bool refAsNonNull(MDefinition* ref) {
if (inDeadCode()) {
return true;
}
auto* ins = MWasmTrapIfNull::New(
alloc(), ref, wasm::Trap::NullPointerDereference, bytecodeOffset());
curBlock_->add(ins);
return true;
}
#ifdef ENABLE_WASM_GC
[[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
const ResultType& type, MDefinition* condition) {
if (inDeadCode()) {
return true;
}
MBasicBlock* fallthroughBlock = nullptr;
if (!newBlock(curBlock_, &fallthroughBlock)) {
return false;
}
MDefinition* check = compareIsNull(condition, JSOp::Eq);
if (!check) {
return false;
}
MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
if (!test ||
!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
return false;
}
if (!pushDefs(values)) {
return false;
}
curBlock_->end(test);
curBlock_ = fallthroughBlock;
return true;
}
[[nodiscard]] bool brOnNonNull(uint32_t relativeDepth,
const DefVector& values,
const ResultType& type,
MDefinition* condition) {
if (inDeadCode()) {
return true;
}
MBasicBlock* fallthroughBlock = nullptr;
if (!newBlock(curBlock_, &fallthroughBlock)) {
return false;
}
MDefinition* check = compareIsNull(condition, JSOp::Ne);
if (!check) {
return false;
}
MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
if (!test ||
!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
return false;
}
if (!pushDefs(values)) {
return false;
}
curBlock_->end(test);
curBlock_ = fallthroughBlock;
return true;
}
#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_GC
MDefinition* refI31(MDefinition* input) {
auto* ins = MWasmNewI31Ref::New(alloc(), input);
curBlock_->add(ins);
return ins;
}
MDefinition* i31Get(MDefinition* input, FieldWideningOp wideningOp) {
auto* ins = MWasmI31RefGet::New(alloc(), input, wideningOp);
curBlock_->add(ins);
return ins;
}
#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_SIMD
// About Wasm SIMD as supported by Ion:
//
// The expectation is that Ion will only ever support SIMD on x86 and x64,
// since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
// implement SIMD.
//
// The division of the operations into MIR nodes reflects that expectation,
// and is a good fit for x86/x64. Should the expectation change we'll
// possibly want to re-architect the SIMD support to be a little more general.
//
// Most SIMD operations map directly to a single MIR node that ultimately ends
// up being expanded in the macroassembler.
//
// Some SIMD operations that do have a complete macroassembler expansion are
// open-coded into multiple MIR nodes here; in some cases that's just
// convenience, in other cases it may also allow them to benefit from Ion
// optimizations. The reason for the expansions will be documented by a
// comment.
// (v128,v128) -> v128 effect-free binary operations
MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
bool commutative, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Simd128);
auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
curBlock_->add(ins);
return ins;
}
// (v128,i32) -> v128 effect-free shift operations
MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
rhs->type() == MIRType::Int32);
int32_t maskBits;
if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
MDefinition* mask = constantI32(maskBits);
auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
curBlock_->add(rhs2);
rhs = rhs2;
}
auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
curBlock_->add(ins);
return ins;
}
// (v128,scalar,imm) -> v128
MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
uint32_t laneIndex, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(lhs->type() == MIRType::Simd128);
auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
curBlock_->add(ins);
return ins;
}
// (scalar) -> v128 effect-free unary operations
MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128) -> v128 effect-free unary operations
MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
curBlock_->add(ins);
return ins;
}
// (v128, imm) -> scalar effect-free unary operations
MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
uint32_t imm = 0) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(src->type() == MIRType::Simd128);
auto* ins =
MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
curBlock_->add(ins);
return ins;
}
// (v128, v128, v128) -> v128 effect-free operations
MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
SimdOp op) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
v1->type() == MIRType::Simd128 &&
v2->type() == MIRType::Simd128);
auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
curBlock_->add(ins);
return ins;
}
// (v128, v128, imm_v128) -> v128 effect-free operations
MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
if (inDeadCode()) {
return nullptr;
}
MOZ_ASSERT(v1->type() == MIRType::Simd128);
MOZ_ASSERT(v2->type() == MIRType::Simd128);
auto* ins = BuildWasmShuffleSimd128(
alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
curBlock_->add(ins);
return ins;
}
// Also see below for SIMD memory references
#endif // ENABLE_WASM_SIMD
/************************************************ Linear memory accesses */
// For detailed information about memory accesses, see "Linear memory
// addresses and bounds checking" in WasmMemory.cpp.
private:
// If the platform does not have a HeapReg, load the memory base from
// instance.
MDefinition* maybeLoadMemoryBase(uint32_t memoryIndex) {
#ifdef WASM_HAS_HEAPREG
if (memoryIndex == 0) {
return nullptr;
}
#endif
return memoryBase(memoryIndex);
}
public:
// A value holding the memory base, whether that's HeapReg or some other
// register.
MDefinition* memoryBase(uint32_t memoryIndex) {
AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
#ifdef WASM_HAS_HEAPREG
if (memoryIndex == 0) {
MWasmHeapReg* base = MWasmHeapReg::New(alloc(), aliases);
curBlock_->add(base);
return base;
}
#endif
uint32_t offset =
memoryIndex == 0
? Instance::offsetOfMemory0Base()
: (Instance::offsetInData(
moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
offsetof(MemoryInstanceData, base)));
MWasmLoadInstance* base = MWasmLoadInstance::New(
alloc(), instancePointer_, offset, MIRType::Pointer, aliases);
curBlock_->add(base);
return base;
}
private:
// If the bounds checking strategy requires it, load the bounds check limit
// from the instance.
MWasmLoadInstance* maybeLoadBoundsCheckLimit(uint32_t memoryIndex,
MIRType type) {
MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
if (moduleEnv_.hugeMemoryEnabled(memoryIndex)) {
return nullptr;
}
uint32_t offset =
memoryIndex == 0
? Instance::offsetOfMemory0BoundsCheckLimit()
: (Instance::offsetInData(
moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
offsetof(MemoryInstanceData, boundsCheckLimit)));
AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
? AliasSet::None()
: AliasSet::Load(AliasSet::WasmHeapMeta);
auto* load = MWasmLoadInstance::New(alloc(), instancePointer_, offset, type,
aliases);
curBlock_->add(load);
return load;
}
// Return true if the access requires an alignment check. If so, sets
// *mustAdd to true if the offset must be added to the pointer before
// checking.
bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
bool* mustAdd) {
MOZ_ASSERT(!*mustAdd);
// asm.js accesses are always aligned and need no checks.
if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
return false;
}
// If the EA is known and aligned it will need no checks.
if (base->isConstant()) {
// We only care about the low bits, so overflow is OK, as is chopping off
// the high bits of an i64 pointer.
uint32_t ptr = 0;
if (isMem64(access->memoryIndex())) {
ptr = uint32_t(base->toConstant()->toInt64());
} else {
ptr = base->toConstant()->toInt32();
}
if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
return false;
}
}
// If the offset is aligned then the EA is just the pointer, for
// the purposes of this check.
*mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
return true;
}
// Fold a constant base into the offset and make the base 0, provided the
// offset stays below the guard limit. The reason for folding the base into
// the offset rather than vice versa is that a small offset can be ignored
// by both explicit bounds checking and bounds check elimination.
void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
if ((*base)->isConstant()) {
uint64_t basePtr = 0;
if (isMem64(access->memoryIndex())) {
basePtr = uint64_t((*base)->toConstant()->toInt64());
} else {
basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
}
uint64_t offset = access->offset64();
if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
offset += uint32_t(basePtr);
access->setOffset32(uint32_t(offset));
*base = isMem64(access->memoryIndex()) ? constantI64(int64_t(0))
: constantI32(0);
}
}
}
// If the offset must be added because it is large or because the true EA must
// be checked, compute the effective address, trapping on overflow.
void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
MDefinition** base, bool mustAddOffset) {
uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
if (access->offset64() >= offsetGuardLimit ||
access->offset64() > UINT32_MAX || mustAddOffset ||
!JitOptions.wasmFoldOffsets) {
*base = computeEffectiveAddress(*base, access);
}
}
MWasmLoadInstance* needBoundsCheck(uint32_t memoryIndex) {
#ifdef JS_64BIT
// For 32-bit base pointers:
//
// If the bounds check uses the full 64 bits of the bounds check limit, then
// the base pointer must be zero-extended to 64 bits before checking and
// wrapped back to 32-bits after Spectre masking. (And it's important that
// the value we end up with has flowed through the Spectre mask.)
//
// If the memory's max size is known to be smaller than 64K pages exactly,
// we can use a 32-bit check and avoid extension and wrapping.
static_assert(0x100000000 % PageSize == 0);
bool mem32LimitIs64Bits =
isMem32(memoryIndex) &&
!moduleEnv_.memories[memoryIndex].boundsCheckLimitIs32Bits() &&
MaxMemoryPages(moduleEnv_.memories[memoryIndex].indexType()) >=
Pages(0x100000000 / PageSize);
#else
// On 32-bit platforms we have no more than 2GB memory and the limit for a
// 32-bit base pointer is never a 64-bit value.
bool mem32LimitIs64Bits = false;
#endif
return maybeLoadBoundsCheckLimit(memoryIndex,
mem32LimitIs64Bits || isMem64(memoryIndex)
? MIRType::Int64
: MIRType::Int32);
}
void performBoundsCheck(uint32_t memoryIndex, MDefinition** base,
MWasmLoadInstance* boundsCheckLimit) {
// At the outset, actualBase could be the result of pretty much any integer
// operation, or it could be the load of an integer constant. If its type
// is i32, we may assume the value has a canonical representation for the
// platform, see doc block in MacroAssembler.h.
MDefinition* actualBase = *base;
// Extend an i32 index value to perform a 64-bit bounds check if the memory
// can be 4GB or larger.
bool extendAndWrapIndex =
isMem32(memoryIndex) && boundsCheckLimit->type() == MIRType::Int64;
if (extendAndWrapIndex) {
auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
curBlock_->add(extended);
actualBase = extended;
}
auto target = memoryIndex == 0 ? MWasmBoundsCheck::Memory0
: MWasmBoundsCheck::Unknown;
auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
bytecodeOffset(), target);
curBlock_->add(ins);
actualBase = ins;
// If we're masking, then we update *base to create a dependency chain
// through the masked index. But we will first need to wrap the index
// value if it was extended above.
if (JitOptions.spectreIndexMasking) {
if (extendAndWrapIndex) {
auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
curBlock_->add(wrapped);
actualBase = wrapped;
}
*base = actualBase;
}
}
// Perform all necessary checking before a wasm heap access, based on the
// attributes of the access and base pointer.
//
// For 64-bit indices on platforms that are limited to indices that fit into
// 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
// `base` that has type Int32. Lowering code depends on this and will assert
// that the base has this type. See the end of this function.
void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
MDefinition** base) {
MOZ_ASSERT(!inDeadCode());
MOZ_ASSERT(!moduleEnv_.isAsmJS());
// Attempt to fold an offset into a constant base pointer so as to simplify
// the addressing expression. This may update *base.
foldConstantPointer(access, base);
// Determine whether an alignment check is needed and whether the offset
// must be checked too.
bool mustAddOffsetForAlignmentCheck = false;
bool alignmentCheck =
needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
// If bounds checking or alignment checking requires it, compute the
// effective address: add the offset into the pointer and trap on overflow.
// This may update *base.
maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
// Emit the alignment check if necessary; it traps if it fails.
if (alignmentCheck) {
curBlock_->add(MWasmAlignmentCheck::New(
alloc(), *base, access->byteSize(), bytecodeOffset()));
}
// Emit the bounds check if necessary; it traps if it fails. This may
// update *base.
MWasmLoadInstance* boundsCheckLimit =
needBoundsCheck(access->memoryIndex());
if (boundsCheckLimit) {
performBoundsCheck(access->memoryIndex(), base, boundsCheckLimit);
}
#ifndef JS_64BIT
if (isMem64(access->memoryIndex())) {
// We must have had an explicit bounds check (or one was elided if it was
// proved redundant), and on 32-bit systems the index will for sure fit in
// 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
// simplify the back-end.
MOZ_ASSERT((*base)->type() == MIRType::Int64);
MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
MOZ_ASSERT(chopped->type() == MIRType::Int32);
curBlock_->add(chopped);
*base = chopped;
}
#endif
}
bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
if (result == ValType::I64 && access->byteSize() <= 4) {
// These smaller accesses should all be zero-extending.
MOZ_ASSERT(!isSignedIntType(access->type()));
return true;
}
return false;
}
public:
bool isMem32(uint32_t memoryIndex) {
return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I32;
}
bool isMem64(uint32_t memoryIndex) {
return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I64;
}
bool hugeMemoryEnabled(uint32_t memoryIndex) {
return moduleEnv_.hugeMemoryEnabled(memoryIndex);
}
// Add the offset into the pointer to yield the EA; trap on overflow.
MDefinition* computeEffectiveAddress(MDefinition* base,
MemoryAccessDesc* access) {
if (inDeadCode()) {
return nullptr;
}
uint64_t offset = access->offset64();
if (offset == 0) {
return base;
}
auto* ins = MWasmAddOffset::New(alloc(), base, offset, bytecodeOffset());
curBlock_->add(ins);
access->clearOffset();
return ins;
}
MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
ValType result) {
if (inDeadCode()) {
return nullptr;
}
MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
MInstruction* load = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset64() == 0);
MWasmLoadInstance* boundsCheckLimit =
maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type());
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
load = MWasmLoad::New(alloc(), memoryBase, base, *access,
result.toMIRType());
}
if (!load) {
return nullptr;
}
curBlock_->add(load);
return load;
}
void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
if (inDeadCode()) {
return;
}
MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
MInstruction* store = nullptr;
if (moduleEnv_.isAsmJS()) {
MOZ_ASSERT(access->offset64() == 0);
MWasmLoadInstance* boundsCheckLimit =
maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
access->type(), v);
} else {
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
}
if (!store) {
return;
}
curBlock_->add(store);
}
MDefinition* atomicCompareExchangeHeap(MDefinition* base,
MemoryAccessDesc* access,
ValType result, MDefinition* oldv,
MDefinition* newv) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtOldv =
MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
curBlock_->add(cvtOldv);
oldv = cvtOldv;
auto* cvtNewv =
MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
curBlock_->add(cvtNewv);
newv = cvtNewv;
}
MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
MInstruction* cas = MWasmCompareExchangeHeap::New(
alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
instancePointer_);
if (!cas) {
return nullptr;
}
curBlock_->add(cas);
if (isSmallerAccessForI64(result, access)) {
cas = MExtendInt32ToInt64::New(alloc(), cas, true);
curBlock_->add(cas);
}
return cas;
}
MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
ValType result, MDefinition* value) {
if (inDeadCode()) {
return nullptr;
}
checkOffsetAndAlignmentAndBounds(access, &base);
#ifndef JS_64BIT
MOZ_ASSERT(base->type() == MIRType::Int32);
#endif
if (isSmallerAccessForI64(result, access)) {
auto* cvtValue =
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
curBlock_->add(cvtValue);
value = cvtValue;
}
MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
MInstruction* xchg =
MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
base, *access, value, instancePointer_);
if (!xchg) {
return nullptr;
}
curBlock_->add(xchg);
if (isSmallerAccessForI64(result, access)) {
xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);