Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmStubs.h"
#include <algorithm>
#include <iterator>
#include <type_traits>
#include "jit/ABIArgGenerator.h"
#include "jit/JitFrames.h"
#include "jit/RegisterAllocator.h"
#include "js/Printf.h"
#include "util/Memory.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h"
#include "wasm/WasmInstance-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
using MIRTypeVector = Vector<jit::MIRType, 8, SystemAllocPolicy>;
using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
/*****************************************************************************/
// ABIResultIter implementation
static uint32_t ResultStackSize(ValType type) {
switch (type.kind()) {
case ValType::I32:
return ABIResult::StackSizeOfInt32;
case ValType::I64:
return ABIResult::StackSizeOfInt64;
case ValType::F32:
return ABIResult::StackSizeOfFloat;
case ValType::F64:
return ABIResult::StackSizeOfDouble;
#ifdef ENABLE_WASM_SIMD
case ValType::V128:
return ABIResult::StackSizeOfV128;
#endif
case ValType::Ref:
return ABIResult::StackSizeOfPtr;
default:
MOZ_CRASH("Unexpected result type");
}
}
// Compute the size of the stack slot that the wasm ABI requires be allocated
// for a particular MIRType. Note that this sometimes differs from the
// MIRType's natural size. See also ResultStackSize above and ABIResult::size()
// and ABIResultIter below.
uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) {
switch (type) {
case MIRType::Int32:
return ABIResult::StackSizeOfInt32;
case MIRType::Int64:
return ABIResult::StackSizeOfInt64;
case MIRType::Float32:
return ABIResult::StackSizeOfFloat;
case MIRType::Double:
return ABIResult::StackSizeOfDouble;
#ifdef ENABLE_WASM_SIMD
case MIRType::Simd128:
return ABIResult::StackSizeOfV128;
#endif
case MIRType::Pointer:
case MIRType::WasmAnyRef:
return ABIResult::StackSizeOfPtr;
default:
MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
}
}
uint32_t ABIResult::size() const { return ResultStackSize(type()); }
void ABIResultIter::settleRegister(ValType type) {
MOZ_ASSERT(!done());
MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
static_assert(MaxRegisterResults == 1, "expected a single register result");
switch (type.kind()) {
case ValType::I32:
cur_ = ABIResult(type, ReturnReg);
break;
case ValType::I64:
cur_ = ABIResult(type, ReturnReg64);
break;
case ValType::F32:
cur_ = ABIResult(type, ReturnFloat32Reg);
break;
case ValType::F64:
cur_ = ABIResult(type, ReturnDoubleReg);
break;
case ValType::Ref:
cur_ = ABIResult(type, ReturnReg);
break;
#ifdef ENABLE_WASM_SIMD
case ValType::V128:
cur_ = ABIResult(type, ReturnSimd128Reg);
break;
#endif
default:
MOZ_CRASH("Unexpected result type");
}
}
void ABIResultIter::settleNext() {
MOZ_ASSERT(direction_ == Next);
MOZ_ASSERT(!done());
uint32_t typeIndex = count_ - index_ - 1;
ValType type = type_[typeIndex];
if (index_ < MaxRegisterResults) {
settleRegister(type);
return;
}
cur_ = ABIResult(type, nextStackOffset_);
nextStackOffset_ += ResultStackSize(type);
}
void ABIResultIter::settlePrev() {
MOZ_ASSERT(direction_ == Prev);
MOZ_ASSERT(!done());
uint32_t typeIndex = index_;
ValType type = type_[typeIndex];
if (count_ - index_ - 1 < MaxRegisterResults) {
settleRegister(type);
return;
}
uint32_t size = ResultStackSize(type);
MOZ_ASSERT(nextStackOffset_ >= size);
nextStackOffset_ -= size;
cur_ = ABIResult(type, nextStackOffset_);
}
#ifdef WASM_CODEGEN_DEBUG
template <class Closure>
static void GenPrint(DebugChannel channel, MacroAssembler& masm,
const Maybe<Register>& taken, Closure passArgAndCall) {
if (!IsCodegenDebugEnabled(channel)) {
return;
}
AllocatableRegisterSet regs(RegisterSet::All());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
if (taken) {
regs.take(taken.value());
}
Register temp = regs.takeAnyGeneral();
{
MOZ_ASSERT(MaybeGetJitContext(),
"codegen debug checks require a jit context");
masm.setupUnalignedABICall(temp);
passArgAndCall(IsCompilingWasm(), temp);
}
masm.PopRegsInMask(save);
}
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
UniqueChars str = JS_vsmprintf(fmt, ap);
va_end(ap);
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
// If we've gone this far, it means we're actually using the debugging
// strings. In this case, we leak them! This is only for debugging, and
// doing the right thing is cumbersome (in Ion, it'd mean add a vec of
// strings to the IonScript; in wasm, it'd mean add it to the current
// Module and serialize it properly).
const char* text = str.release();
masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
masm.passABIArg(temp);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintText);
} else {
using Fn = void (*)(const char* output);
masm.callWithABI<Fn, PrintText>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintI32);
} else {
using Fn = void (*)(int32_t val);
masm.callWithABI<Fn, PrintI32>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintPtr);
} else {
using Fn = void (*)(uint8_t* val);
masm.callWithABI<Fn, PrintPtr>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {
# if JS_BITS_PER_WORD == 64
GenPrintf(channel, masm, "i64 ");
GenPrintIsize(channel, masm, src.reg);
# else
GenPrintf(channel, masm, "i64(");
GenPrintIsize(channel, masm, src.low);
GenPrintIsize(channel, masm, src.high);
GenPrintf(channel, masm, ") ");
# endif
}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, ABIType::Float32);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF32);
} else {
using Fn = void (*)(float val);
masm.callWithABI<Fn, PrintF32>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, ABIType::Float64);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF64);
} else {
using Fn = void (*)(double val);
masm.callWithABI<Fn, PrintF64>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
# ifdef ENABLE_WASM_SIMD
static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
// TODO: We might try to do something meaningful here once SIMD data are
// aligned and hence C++-ABI compliant. For now, just make ourselves visible.
GenPrintf(channel, masm, "v128");
}
# endif
#else
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
# ifdef ENABLE_WASM_SIMD
static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
# endif
#endif
static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
// On old ARM hardware, constant pools could be inserted and they need to
// be flushed before considering the size of the masm.
masm.flushBuffer();
offsets->end = masm.size();
return !masm.oom();
}
static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
uint32_t addBeforeAssert = 0) {
MOZ_ASSERT(
(sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
masm.assertStackAlignment(alignment, addBeforeAssert);
}
template <class VectorT, template <class VecT> class ABIArgIterT>
static unsigned StackArgBytesHelper(const VectorT& args) {
ABIArgIterT<VectorT> iter(args);
while (!iter.done()) {
iter++;
}
return iter.stackBytesConsumedSoFar();
}
template <class VectorT>
static unsigned StackArgBytesForNativeABI(const VectorT& args) {
return StackArgBytesHelper<VectorT, ABIArgIter>(args);
}
template <class VectorT>
static unsigned StackArgBytesForWasmABI(const VectorT& args) {
return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
}
static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
ArgTypeVector args(funcType);
return StackArgBytesForWasmABI(args);
}
static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
const FuncType& funcType, Register argv,
Register scratch) {
// Copy parameters out of argv and into the registers/stack-slots specified by
// the wasm ABI.
//
// SetupABIArguments are only used for C++ -> wasm calls through callExport(),
// and V128 and Ref types (other than externref) are not currently allowed.
ArgTypeVector args(funcType);
for (WasmABIArgIter iter(args); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32) {
masm.load32(src, iter->gpr());
} else if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else if (type == MIRType::WasmAnyRef) {
masm.loadPtr(src, iter->gpr());
} else if (type == MIRType::StackResults) {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, iter->gpr());
} else {
MOZ_CRASH("unknown GPR type");
}
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
}
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
case MIRType::Simd128:
#ifdef ENABLE_WASM_SIMD
// This is only used by the testing invoke path,
// wasmLosslessInvoke, and is guarded against in normal JS-API
// call paths.
masm.loadUnalignedSimd128(src, iter->fpu());
break;
#else
MOZ_CRASH("V128 not supported in SetupABIArguments");
#endif
default:
MOZ_CRASH("unexpected FPU type");
break;
}
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Int64: {
RegisterOrSP sp = masm.getStackPointer();
masm.copy64(src, Address(sp, iter->offsetFromArgBase()), scratch);
break;
}
case MIRType::WasmAnyRef:
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
case MIRType::Simd128: {
#ifdef ENABLE_WASM_SIMD
// This is only used by the testing invoke path,
// wasmLosslessInvoke, and is guarded against in normal JS-API
// call paths.
ScratchSimd128Scope fpscratch(masm);
masm.loadUnalignedSimd128(src, fpscratch);
masm.storeUnalignedSimd128(
fpscratch,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
#else
MOZ_CRASH("V128 not supported in SetupABIArguments");
#endif
}
case MIRType::StackResults: {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
default:
MOZ_CRASH("unexpected stack arg type");
}
break;
case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
}
static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
const FuncType& funcType, Register loc) {
ResultType results = ResultType::Vector(funcType.results());
DebugOnly<bool> sawRegisterResult = false;
for (ABIResultIter iter(results); !iter.done(); iter.next()) {
const ABIResult& result = iter.cur();
if (result.inRegister()) {
MOZ_ASSERT(!sawRegisterResult);
sawRegisterResult = true;
switch (result.type().kind()) {
case ValType::I32:
masm.store32(result.gpr(), Address(loc, 0));
break;
case ValType::I64:
masm.store64(result.gpr64(), Address(loc, 0));
break;
case ValType::V128:
#ifdef ENABLE_WASM_SIMD
masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
break;
#else
MOZ_CRASH("V128 not supported in StoreABIReturn");
#endif
case ValType::F32:
masm.storeFloat32(result.fpr(), Address(loc, 0));
break;
case ValType::F64:
masm.storeDouble(result.fpr(), Address(loc, 0));
break;
case ValType::Ref:
masm.storePtr(result.gpr(), Address(loc, 0));
break;
}
}
}
MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
}
#if defined(JS_CODEGEN_ARM)
// The ARM system ABI also includes d15 & s31 in the non volatile float
// registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
GeneralRegisterSet(Registers::NonVolatileMask &
~(Registers::SetType(1) << Registers::lr)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
(FloatRegisters::SetType(1) << FloatRegisters::d15) |
(FloatRegisters::SetType(1) << FloatRegisters::s31)));
#elif defined(JS_CODEGEN_ARM64)
// Exclude the Link Register (x30) because it is preserved manually.
//
// Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
// Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
GeneralRegisterSet((Registers::NonVolatileMask &
~(Registers::SetType(1) << Registers::lr)) |
(Registers::SetType(1) << Registers::x16)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
FloatRegisters::NonAllocatableMask));
#else
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
FloatRegisterSet(FloatRegisters::NonVolatileMask));
#endif
#ifdef JS_CODEGEN_ARM64
static const unsigned WasmPushSize = 16;
#else
static const unsigned WasmPushSize = sizeof(void*);
#endif
static void AssertExpectedSP(MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
# ifdef DEBUG
// Since we're asserting that SP is the currently active stack pointer,
// let's also in effect assert that PSP is dead -- by setting it to 1, so as
// to cause to cause any attempts to use it to segfault in an easily
// identifiable way.
masm.asVIXL().Mov(PseudoStackPointer64, 1);
# endif
#endif
}
template <class Operand>
static void WasmPush(MacroAssembler& masm, const Operand& op) {
#ifdef JS_CODEGEN_ARM64
// Allocate a pad word so that SP can remain properly aligned. |op| will be
// written at the lower-addressed of the two words pushed here.
masm.reserveStack(WasmPushSize);
masm.storePtr(op, Address(masm.getStackPointer(), 0));
#else
masm.Push(op);
#endif
}
static void WasmPop(MacroAssembler& masm, Register r) {
#ifdef JS_CODEGEN_ARM64
// Also pop the pad word allocated by WasmPush.
masm.loadPtr(Address(masm.getStackPointer(), 0), r);
masm.freeStack(WasmPushSize);
#else
masm.Pop(r);
#endif
}
static void MoveSPForJitABI(MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
masm.moveStackPtrTo(PseudoStackPointer);
#endif
}
static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
const Maybe<ImmPtr>& funcPtr) {
MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
MoveSPForJitABI(masm);
if (funcPtr) {
masm.call(*funcPtr);
} else {
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
}
}
// Generate a stub that enters wasm from a C++ caller via the native ABI. The
// signature of the entry point is Module::ExportFuncPtr. The exported wasm
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
const FuncType& funcType,
const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AutoCreatedBy acb(masm, "GenerateInterpEntry");
AssertExpectedSP(masm);
// UBSAN expects that the word before a C++ function pointer is readable for
// some sort of generated assertion.
//
// These interp entry points can sometimes be output at the beginning of a
// code page allocation, which will cause access violations when called with
// UBSAN enabled.
//
// Insert some padding in this case by inserting a breakpoint before we align
// our code. This breakpoint will misalign the code buffer (which was aligned
// due to being at the beginning of the buffer), which will then be aligned
// and have at least one word of padding before this entry point.
if (masm.currentOffset() == 0) {
masm.breakpoint();
}
masm.haltingAlign(CodeAlignment);
// Double check that the first word is available for UBSAN; see above.
static_assert(CodeAlignment >= sizeof(uintptr_t));
MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() >= sizeof(uintptr_t));
offsets->begin = masm.currentOffset();
// Save the return address if it wasn't already saved by the call insn.
#ifdef JS_USE_LINK_REGISTER
# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
masm.pushReturnAddress();
# elif defined(JS_CODEGEN_ARM64)
// WasmPush updates framePushed() unlike pushReturnAddress(), but that's
// cancelled by the setFramePushed() below.
WasmPush(masm, lr);
# else
MOZ_CRASH("Implement this");
# endif
#endif
// Save all caller non-volatile registers before we clobber them here and in
// the wasm callee (which does not preserve non-volatile registers).
masm.setFramePushed(0);
masm.PushRegsInMask(NonVolatileRegs);
const unsigned nonVolatileRegsPushSize =
MacroAssembler::PushRegsInMaskSizeInBytes(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == nonVolatileRegsPushSize);
// Put the 'argv' argument into a non-argument/return/instance register so
// that we can use 'argv' while we fill in the arguments for the wasm callee.
// Use a second non-argument/return register as temporary scratch.
Register argv = ABINonArgReturnReg0;
Register scratch = ABINonArgReturnReg1;
// scratch := SP
masm.moveStackPtrTo(scratch);
// Dynamically align the stack since ABIStackAlignment is not necessarily
// WasmStackAlignment. Preserve SP so it can be restored after the call.
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
#else
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
#endif
masm.assertStackAlignment(WasmStackAlignment);
// Create a fake frame: just previous RA and an FP.
const size_t FakeFrameSize = 2 * sizeof(void*);
#ifdef JS_CODEGEN_ARM64
masm.Ldr(ARMRegister(ABINonArgReturnReg0, 64),
MemOperand(ARMRegister(scratch, 64), nonVolatileRegsPushSize));
#else
masm.Push(Address(scratch, nonVolatileRegsPushSize));
#endif
// Store fake wasm register state. Ensure the frame pointer passed by the C++
// caller doesn't have the ExitFPTag bit set to not confuse frame iterators.
// This bit shouldn't be set if C++ code is using frame pointers, so this has
// no effect on native stack unwinders.
masm.andPtr(Imm32(int32_t(~ExitFPTag)), FramePointer);
#ifdef JS_CODEGEN_ARM64
masm.asVIXL().Push(ARMRegister(ABINonArgReturnReg0, 64),
ARMRegister(FramePointer, 64));
masm.moveStackPtrTo(FramePointer);
#else
masm.Push(FramePointer);
#endif
masm.moveStackPtrTo(FramePointer);
masm.setFramePushed(0);
#ifdef JS_CODEGEN_ARM64
DebugOnly<size_t> fakeFramePushed = 0;
#else
DebugOnly<size_t> fakeFramePushed = sizeof(void*);
masm.Push(scratch);
#endif
// Read the arguments of wasm::ExportFuncPtr according to the native ABI.
// The entry stub's frame is 1 word.
const unsigned argBase = sizeof(void*) + nonVolatileRegsPushSize;
ABIArgGenerator abi;
ABIArg arg;
// arg 1: ExportArg*
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), argv);
} else {
masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()), argv);
}
// Arg 2: Instance*
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), InstanceReg);
} else {
masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()),
InstanceReg);
}
WasmPush(masm, InstanceReg);
// Save 'argv' on the stack so that we can recover it after the call.
WasmPush(masm, argv);
MOZ_ASSERT(masm.framePushed() == 2 * WasmPushSize + fakeFramePushed,
"expected instance, argv, and fake frame");
uint32_t frameSizeBeforeCall = masm.framePushed();
// Align (missing) results area to WasmStackAlignment boudary. Return calls
// expect arguments to not overlap with results or other slots.
unsigned aligned =
AlignBytes(masm.framePushed() + FakeFrameSize, WasmStackAlignment);
masm.reserveStack(aligned - masm.framePushed() + FakeFrameSize);
// Reserve stack space for the wasm call.
unsigned argDecrement = StackDecrementForCall(
WasmStackAlignment, aligned, StackArgBytesForWasmABI(funcType));
masm.reserveStack(argDecrement);
// Copy parameters out of argv and into the wasm ABI registers/stack-slots.
SetupABIArguments(masm, fe, funcType, argv, scratch);
masm.loadWasmPinnedRegsFromInstance();
masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
WasmCalleeInstanceOffsetBeforeCall));
// Call into the real function. Note that, due to the throw stub, fp, instance
// and pinned registers may be clobbered.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
// Set the return value based on whether InstanceReg is the
// InterpFailInstanceReg magic value (set by the exception handler).
Label success, join;
masm.branchPtr(Assembler::NotEqual, InstanceReg, Imm32(InterpFailInstanceReg),
&success);
masm.move32(Imm32(false), scratch);
masm.jump(&join);
masm.bind(&success);
masm.move32(Imm32(true), scratch);
masm.bind(&join);
// Pop the arguments pushed after the dynamic alignment.
masm.setFramePushed(frameSizeBeforeCall);
masm.freeStackTo(frameSizeBeforeCall);
// Recover the 'argv' pointer which was saved before aligning the stack.
WasmPop(masm, argv);
WasmPop(masm, InstanceReg);
// Pop the stack pointer to its value right before dynamic alignment.
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
masm.setFramePushed(FakeFrameSize);
masm.freeStack(FakeFrameSize);
#else
masm.PopStackPtr();
#endif
// Store the register result, if any, in argv[0].
// No widening is required, as the value leaves ReturnReg.
StoreRegisterResult(masm, fe, funcType, argv);
masm.move32(scratch, ReturnReg);
// Restore clobbered non-volatile registers of the caller.
masm.setFramePushed(nonVolatileRegsPushSize);
masm.PopRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == 0);
#if defined(JS_CODEGEN_ARM64)
masm.setFramePushed(WasmPushSize);
WasmPop(masm, lr);
masm.abiret();
#else
masm.ret();
#endif
return FinishOffsets(masm, offsets);
}
#ifdef JS_PUNBOX64
static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
#else
static const ValueOperand ScratchValIonEntry =
ValueOperand(ABINonArgReg0, ABINonArgReg1);
#endif
static const Register ScratchIonEntry = ABINonArgReg2;
static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
SymbolicAddress sym) {
if (isAbsolute) {
masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
} else {
masm.call(sym);
}
}
// Load instance's instance from the callee.
static void GenerateJitEntryLoadInstance(MacroAssembler& masm) {
// ScratchIonEntry := callee => JSFunction*
unsigned offset = JitFrameLayout::offsetOfCalleeToken();
masm.loadFunctionFromCalleeToken(Address(FramePointer, offset),
ScratchIonEntry);
// ScratchIonEntry := callee->getExtendedSlot(WASM_INSTANCE_SLOT)->toPrivate()
// => Instance*
offset = FunctionExtended::offsetOfExtendedSlot(
FunctionExtended::WASM_INSTANCE_SLOT);
masm.loadPrivate(Address(ScratchIonEntry, offset), InstanceReg);
}
// Creates a JS fake exit frame for wasm, so the frame iterators just use
// JSJit frame iteration.
//
// Note: the caller must ensure InstanceReg is valid.
static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
MOZ_ASSERT(masm.framePushed() == frameSize);
masm.freeStack(frameSize);
MoveSPForJitABI(masm);
masm.loadPtr(Address(InstanceReg, Instance::offsetOfCx()), ScratchIonEntry);
masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
ExitFrameType::WasmGenericJitEntry);
masm.loadPtr(Address(InstanceReg, Instance::offsetOfJSJitExceptionHandler()),
ScratchIonEntry);
masm.jump(ScratchIonEntry);
}
// Helper function for allocating a BigInt and initializing it from an I64 in
// GenerateJitEntry. The return result is written to scratch.
//
// Note that this will create a new frame and must not - in its current form -
// be called from a context where there is already another stub frame on the
// stack, as that confuses unwinding during profiling. This was a problem for
// FuncType::canHaveJitExit prevents the present function from being called for
// exits.
static void GenerateBigIntInitialization(MacroAssembler& masm,
unsigned bytesPushedByPrologue,
Register64 input, Register scratch,
const FuncExport& fe, Label* fail) {
#if JS_BITS_PER_WORD == 32
MOZ_ASSERT(input.low != scratch);
MOZ_ASSERT(input.high != scratch);
#else
MOZ_ASSERT(input.reg != scratch);
#endif
// We need to avoid clobbering other argument registers and the input.
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
unsigned frameSize = StackDecrementForCall(
ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
masm.reserveStack(frameSize);
masm.assertStackAlignment(ABIStackAlignment);
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::AllocateBigInt);
masm.storeCallPointerResult(scratch);
masm.assertStackAlignment(ABIStackAlignment);
masm.freeStack(frameSize);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(save, ignore);
masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
}
// Generate a stub that enters wasm from a jit code caller via the jit ABI.
//
// ARM64 note: This does not save the PseudoStackPointer so we must be sure to
// recompute it on every return path, be it normal return or exception return.
// The JIT code we return to assumes it is correct.
static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
const FuncExport& fe, const FuncType& funcType,
const Maybe<ImmPtr>& funcPtr,
CallableOffsets* offsets) {
AutoCreatedBy acb(masm, "GenerateJitEntry");
AssertExpectedSP(masm);
RegisterOrSP sp = masm.getStackPointer();
GenerateJitEntryPrologue(masm, offsets);
// The jit caller has set up the following stack layout (sp grows to the
// left):
// <-- retAddr | descriptor | callee | argc | this | arg1..N
//
// GenerateJitEntryPrologue has additionally pushed the caller's frame
// pointer. The stack pointer is now JitStackAlignment-aligned.
//
// We initialize an ExitFooterFrame (with ExitFrameType::WasmGenericJitEntry)
// immediately below the frame pointer to ensure FP is a valid JS JIT exit
// frame.
MOZ_ASSERT(masm.framePushed() == 0);
unsigned normalBytesNeeded =
ExitFooterFrame::Size() + StackArgBytesForWasmABI(funcType);
MIRTypeVector coerceArgTypes;
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
unsigned oolBytesNeeded =
ExitFooterFrame::Size() + StackArgBytesForWasmABI(coerceArgTypes);
unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
// Note the jit caller ensures the stack is aligned *after* the call
// instruction.
unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
masm.framePushed(), bytesNeeded);
// Reserve stack space for wasm ABI arguments, set up like this:
// <-- ABI args | padding
masm.reserveStack(frameSize);
MOZ_ASSERT(masm.framePushed() == frameSize);
// Initialize the ExitFooterFrame.
static_assert(ExitFooterFrame::Size() == sizeof(uintptr_t));
masm.storePtr(ImmWord(uint32_t(ExitFrameType::WasmGenericJitEntry)),
Address(FramePointer, -int32_t(ExitFooterFrame::Size())));
GenerateJitEntryLoadInstance(masm);
if (funcType.hasUnexposableArgOrRet()) {
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::ReportV128JSCall);
GenerateJitEntryThrow(masm, frameSize);
return FinishOffsets(masm, offsets);
}
FloatRegister scratchF = ABINonArgDoubleReg;
Register scratchG = ScratchIonEntry;
ValueOperand scratchV = ScratchValIonEntry;
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
fe.funcIndex());
// We do two loops:
// - one loop up-front will make sure that all the Value tags fit the
// expected signature argument types. If at least one inline conversion
// fails, we just jump to the OOL path which will call into C++. Inline
// conversions are ordered in the way we expect them to happen the most.
// - the second loop will unbox the arguments into the right registers.
Label oolCall;
for (size_t i = 0; i < funcType.args().length(); i++) {
Address jitArgAddr(FramePointer, JitFrameLayout::offsetOfActualArg(i));
masm.loadValue(jitArgAddr, scratchV);
Label next;
switch (funcType.args()[i].kind()) {
case ValType::I32: {
Label isDouble, isUndefinedOrNull, isBoolean;
{
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For int32 inputs, just skip.
masm.branchTestInt32(Assembler::Equal, tag, &next);
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefinedOrNull);
masm.branchTestNull(Assembler::Equal, tag, &isUndefinedOrNull);
masm.branchTestBoolean(Assembler::Equal, tag, &isBoolean);
// Other types (symbol, object, strings) go to the C++ call.
masm.jump(&oolCall);
}
Label storeBack;
// For double inputs, unbox, truncate and store back.
masm.bind(&isDouble);
{
masm.unboxDouble(scratchV, scratchF);
masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
masm.jump(&storeBack);
}
// For null or undefined, store 0.
masm.bind(&isUndefinedOrNull);
{
masm.storeValue(Int32Value(0), jitArgAddr);
masm.jump(&next);
}
// For booleans, store the number value back.
masm.bind(&isBoolean);
masm.unboxBoolean(scratchV, scratchG);
// fallthrough:
masm.bind(&storeBack);
masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
break;
}
case ValType::I64: {
// For BigInt inputs, just skip. Otherwise go to C++ for other
// types that require creating a new BigInt or erroring.
masm.branchTestBigInt(Assembler::NotEqual, scratchV, &oolCall);
break;
}
case ValType::F32:
case ValType::F64: {
// Note we can reuse the same code for f32/f64 here, since for the
// case of f32, the conversion of f64 to f32 will happen in the
// second loop.
Label isInt32OrBoolean, isUndefined, isNull;
{
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For double inputs, just skip.
masm.branchTestDouble(Assembler::Equal, tag, &next);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32OrBoolean);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
masm.branchTestNull(Assembler::Equal, tag, &isNull);
masm.branchTestBoolean(Assembler::Equal, tag, &isInt32OrBoolean);
// Other types (symbol, object, strings) go to the C++ call.
masm.jump(&oolCall);
}
// For int32 and boolean inputs, convert and rebox.
masm.bind(&isInt32OrBoolean);
{
masm.convertInt32ToDouble(scratchV.payloadOrValueReg(), scratchF);
masm.boxDouble(scratchF, jitArgAddr);
masm.jump(&next);
}
// For undefined (missing argument), store NaN.
masm.bind(&isUndefined);
{
masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
masm.jump(&next);
}
// +null is 0.
masm.bind(&isNull);
{ masm.storeValue(DoubleValue(0.), jitArgAddr); }
break;
}
case ValType::Ref: {
// Guarded against by temporarilyUnsupportedReftypeForEntry()
MOZ_RELEASE_ASSERT(funcType.args()[i].refType().isExtern());
masm.branchValueConvertsToWasmAnyRefInline(scratchV, scratchG, scratchF,
&next);
masm.jump(&oolCall);
break;
}
case ValType::V128: {
// Guarded against by hasUnexposableArgOrRet()
MOZ_CRASH("unexpected argument type when calling from the jit");
}
default: {
MOZ_CRASH("unexpected argument type when calling from the jit");
}
}
masm.nopAlign(CodeAlignment);
masm.bind(&next);
}
Label rejoinBeforeCall;
masm.bind(&rejoinBeforeCall);
// Convert all the expected values to unboxed values on the stack.
ArgTypeVector args(funcType);
for (WasmABIArgIter iter(args); !iter.done(); iter++) {
Address argv(FramePointer, JitFrameLayout::offsetOfActualArg(iter.index()));
bool isStackArg = iter->kind() == ABIArg::Stack;
switch (iter.mirType()) {
case MIRType::Int32: {
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
masm.unboxInt32(argv, target);
GenPrintIsize(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Int64: {
// The coercion has provided a BigInt value by this point, which
// we need to convert to an I64 here.
if (isStackArg) {
Address dst(sp, iter->offsetFromArgBase());
Register src = scratchV.payloadOrValueReg();
#if JS_BITS_PER_WORD == 64
Register64 scratch64(scratchG);
#else
Register64 scratch64(scratchG, ABINonArgReg3);
#endif
masm.unboxBigInt(argv, src);
masm.loadBigInt64(src, scratch64);
GenPrintI64(DebugChannel::Function, masm, scratch64);
masm.store64(scratch64, dst);
} else {
Register src = scratchG;
Register64 target = iter->gpr64();
masm.unboxBigInt(argv, src);
masm.loadBigInt64(src, target);
GenPrintI64(DebugChannel::Function, masm, target);
}
break;
}
case MIRType::Float32: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, ABINonArgDoubleReg);
masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
GenPrintF32(DebugChannel::Function, masm, target.asSingle());
if (isStackArg) {
masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Double: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, target);
GenPrintF64(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::WasmAnyRef: {
ValueOperand src = ScratchValIonEntry;
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
masm.loadValue(argv, src);
// The loop before should ensure that all values that require boxing
// have been taken care of.
Label join;
Label fail;
masm.convertValueToWasmAnyRef(src, target, scratchF, &fail);
masm.jump(&join);
masm.bind(&fail);
masm.breakpoint();
masm.bind(&join);
GenPrintPtr(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
default: {
MOZ_CRASH("unexpected input argument when calling from jit");
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
// Setup wasm register state.
masm.loadWasmPinnedRegsFromInstance();
masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
WasmCalleeInstanceOffsetBeforeCall));
// Call into the real function.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex());
// Store the return value in the JSReturnOperand.
Label exception;
const ValTypeVector& results = funcType.results();
if (results.length() == 0) {
GenPrintf(DebugChannel::Function, masm, "void");
masm.moveValue(UndefinedValue(), JSReturnOperand);
} else {
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
switch (results[0].kind()) {
case ValType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
// No widening is required, as the value is boxed.
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
break;
case ValType::F32: {
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ValType::F64: {
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ValType::I64: {
GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
MOZ_ASSERT(masm.framePushed() == frameSize);
GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, fe,