Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2015 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmStubs.h"
#include "mozilla/ArrayUtils.h"
#include <algorithm>
#include "jit/JitScript.h"
#include "jit/RegisterAllocator.h"
#include "js/Printf.h"
#include "util/Memory.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::ArrayLength;
typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
/*****************************************************************************/
// ABIResultIter implementation
static uint32_t ResultStackSize(ValType type) {
switch (type.kind()) {
case ValType::I32:
return ABIResult::StackSizeOfInt32;
case ValType::I64:
return ABIResult::StackSizeOfInt64;
case ValType::F32:
return ABIResult::StackSizeOfFloat;
case ValType::F64:
return ABIResult::StackSizeOfDouble;
#ifdef ENABLE_WASM_SIMD
case ValType::V128:
return ABIResult::StackSizeOfV128;
#endif
case ValType::Ref:
return ABIResult::StackSizeOfPtr;
default:
MOZ_CRASH("Unexpected result type");
}
}
uint32_t ABIResult::size() const { return ResultStackSize(type()); }
void ABIResultIter::settleRegister(ValType type) {
MOZ_ASSERT(!done());
MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
static_assert(MaxRegisterResults == 1, "expected a single register result");
switch (type.kind()) {
case ValType::I32:
cur_ = ABIResult(type, ReturnReg);
break;
case ValType::I64:
cur_ = ABIResult(type, ReturnReg64);
break;
case ValType::F32:
cur_ = ABIResult(type, ReturnFloat32Reg);
break;
case ValType::F64:
cur_ = ABIResult(type, ReturnDoubleReg);
break;
case ValType::Ref:
cur_ = ABIResult(type, ReturnReg);
break;
#ifdef ENABLE_WASM_SIMD
case ValType::V128:
cur_ = ABIResult(type, ReturnSimd128Reg);
break;
#endif
default:
MOZ_CRASH("Unexpected result type");
}
}
void ABIResultIter::settleNext() {
MOZ_ASSERT(direction_ == Next);
MOZ_ASSERT(!done());
uint32_t typeIndex = count_ - index_ - 1;
ValType type = type_[typeIndex];
if (index_ < MaxRegisterResults) {
settleRegister(type);
return;
}
cur_ = ABIResult(type, nextStackOffset_);
nextStackOffset_ += ResultStackSize(type);
}
void ABIResultIter::settlePrev() {
MOZ_ASSERT(direction_ == Prev);
MOZ_ASSERT(!done());
uint32_t typeIndex = index_;
ValType type = type_[typeIndex];
if (count_ - index_ - 1 < MaxRegisterResults) {
settleRegister(type);
return;
}
uint32_t size = ResultStackSize(type);
MOZ_ASSERT(nextStackOffset_ >= size);
nextStackOffset_ -= size;
cur_ = ABIResult(type, nextStackOffset_);
}
#ifdef WASM_CODEGEN_DEBUG
template <class Closure>
static void GenPrint(DebugChannel channel, MacroAssembler& masm,
const Maybe<Register>& taken, Closure passArgAndCall) {
if (!IsCodegenDebugEnabled(channel)) {
return;
}
AllocatableRegisterSet regs(RegisterSet::All());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
if (taken) {
regs.take(taken.value());
}
Register temp = regs.takeAnyGeneral();
{
MOZ_ASSERT(MaybeGetJitContext(),
"codegen debug checks require a jit context");
masm.setupUnalignedABICall(temp);
passArgAndCall(IsCompilingWasm(), temp);
}
masm.PopRegsInMask(save);
}
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
UniqueChars str = JS_vsmprintf(fmt, ap);
va_end(ap);
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
// If we've gone this far, it means we're actually using the debugging
// strings. In this case, we leak them! This is only for debugging, and
// doing the right thing is cumbersome (in Ion, it'd mean add a vec of
// strings to the IonScript; in wasm, it'd mean add it to the current
// Module and serialize it properly).
const char* text = str.release();
masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
masm.passABIArg(temp);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintText);
} else {
masm.callWithABI((void*)PrintText, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintI32);
} else {
masm.callWithABI((void*)PrintI32, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
masm.passABIArg(src);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintPtr);
} else {
masm.callWithABI((void*)PrintPtr, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {
# if JS_BITS_PER_WORD == 64
GenPrintf(channel, masm, "i64 ");
GenPrintIsize(channel, masm, src.reg);
# else
GenPrintf(channel, masm, "i64(");
GenPrintIsize(channel, masm, src.low);
GenPrintIsize(channel, masm, src.high);
GenPrintf(channel, masm, ") ");
# endif
}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, MoveOp::FLOAT32);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF32);
} else {
masm.callWithABI((void*)PrintF32, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
masm.passABIArg(src, MoveOp::DOUBLE);
if (inWasm) {
masm.callDebugWithABI(SymbolicAddress::PrintF64);
} else {
masm.callWithABI((void*)PrintF64, MoveOp::GENERAL,
CheckUnsafeCallWithABI::DontCheckOther);
}
});
}
# ifdef ENABLE_WASM_SIMD
static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {
// TODO: We might try to do something meaningful here once SIMD data are
// aligned and hence C++-ABI compliant. For now, just make ourselves visible.
GenPrintf(channel, masm, "v128");
}
# endif
#else
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
const char* fmt, ...) {}
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
const Register& src) {}
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
const Register64& src) {}
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
# ifdef ENABLE_WASM_SIMD
static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
const FloatRegister& src) {}
# endif
#endif
static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
// On old ARM hardware, constant pools could be inserted and they need to
// be flushed before considering the size of the masm.
masm.flushBuffer();
offsets->end = masm.size();
return !masm.oom();
}
static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
uint32_t addBeforeAssert = 0) {
MOZ_ASSERT(
(sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
masm.assertStackAlignment(alignment, addBeforeAssert);
}
template <class VectorT>
static unsigned StackArgBytes(const VectorT& args) {
ABIArgIter<VectorT> iter(args);
while (!iter.done()) {
iter++;
}
return iter.stackBytesConsumedSoFar();
}
static unsigned StackArgBytes(const FuncType& funcType) {
ArgTypeVector args(funcType);
return StackArgBytes(args);
}
static void Move64(MacroAssembler& masm, const Address& src,
const Address& dest, Register scratch) {
#if JS_BITS_PER_WORD == 32
masm.load32(LowWord(src), scratch);
masm.store32(scratch, LowWord(dest));
masm.load32(HighWord(src), scratch);
masm.store32(scratch, HighWord(dest));
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
masm.store64(scratch64, dest);
#endif
}
static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
Register argv, Register scratch) {
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
//
// SetupABIArguments are only used for C++ -> wasm calls through callExport(),
// and V128 and Ref types (other than anyref) are not currently allowed.
ArgTypeVector args(fe.funcType());
for (ABIArgIter iter(args); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
switch (iter->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32) {
masm.load32(src, iter->gpr());
} else if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else if (type == MIRType::RefOrNull) {
masm.loadPtr(src, iter->gpr());
} else if (type == MIRType::StackResults) {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, iter->gpr());
} else {
MOZ_CRASH("unknown GPR type");
}
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64) {
masm.load64(src, iter->gpr64());
} else {
MOZ_CRASH("wasm uses hardfp for function calls.");
}
break;
#endif
case ABIArg::FPU: {
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
case MIRType::Float32:
masm.loadFloat32(src, iter->fpu());
break;
case MIRType::Simd128:
#ifdef ENABLE_WASM_SIMD
// We will reach this point when we generate interpreter entry stubs
// for exports that receive v128 values, but the code will never be
// executed because such exports cannot be called from JS.
masm.breakpoint();
break;
#else
MOZ_CRASH("V128 not supported in SetupABIArguments");
#endif
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
break;
}
break;
}
case ABIArg::Stack:
switch (type) {
case MIRType::Int32:
masm.load32(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Int64: {
RegisterOrSP sp = masm.getStackPointer();
Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
break;
}
case MIRType::RefOrNull:
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
case MIRType::Simd128: {
#ifdef ENABLE_WASM_SIMD
// We will reach this point when we generate interpreter entry stubs
// for exports that receive v128 values, but the code will never be
// executed because such exports cannot be called from JS.
masm.breakpoint();
break;
#else
MOZ_CRASH("V128 not supported in SetupABIArguments");
#endif
}
case MIRType::StackResults: {
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
masm.loadPtr(src, scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(),
iter->offsetFromArgBase()));
break;
}
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
"unexpected stack arg type");
}
break;
case ABIArg::Uninitialized:
MOZ_CRASH("Uninitialized ABIArg kind");
}
}
}
static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
Register loc) {
ResultType results = ResultType::Vector(fe.funcType().results());
DebugOnly<bool> sawRegisterResult = false;
for (ABIResultIter iter(results); !iter.done(); iter.next()) {
const ABIResult& result = iter.cur();
if (result.inRegister()) {
MOZ_ASSERT(!sawRegisterResult);
sawRegisterResult = true;
switch (result.type().kind()) {
case ValType::I32:
masm.store32(result.gpr(), Address(loc, 0));
break;
case ValType::I64:
masm.store64(result.gpr64(), Address(loc, 0));
break;
case ValType::V128:
#ifdef ENABLE_WASM_SIMD
masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
break;
#else
MOZ_CRASH("V128 not supported in StoreABIReturn");
#endif
case ValType::F32:
masm.canonicalizeFloat(result.fpr());
masm.storeFloat32(result.fpr(), Address(loc, 0));
break;
case ValType::F64:
masm.canonicalizeDouble(result.fpr());
masm.storeDouble(result.fpr(), Address(loc, 0));
break;
case ValType::Ref:
masm.storePtr(result.gpr(), Address(loc, 0));
break;
}
}
}
MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
}
#if defined(JS_CODEGEN_ARM)
// The ARM system ABI also includes d15 & s31 in the non volatile float
// registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask &
~(uint32_t(1) << Registers::lr)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
(1ULL << FloatRegisters::d15) |
(1ULL << FloatRegisters::s31)));
#elif defined(JS_CODEGEN_ARM64)
// Exclude the Link Register (x30) because it is preserved manually.
//
// Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
// Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet((Registers::NonVolatileMask &
~(uint32_t(1) << Registers::lr)) |
(uint32_t(1) << Registers::x16)),
FloatRegisterSet(FloatRegisters::NonVolatileMask |
FloatRegisters::NonAllocatableMask));
#else
static const LiveRegisterSet NonVolatileRegs =
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
FloatRegisterSet(FloatRegisters::NonVolatileMask));
#endif
#if defined(JS_CODEGEN_NONE)
static const unsigned NonVolatileRegsPushSize = 0;
#else
static const unsigned NonVolatileRegsPushSize =
NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
NonVolatileRegs.fpus().getPushSizeInBytes();
#endif
#ifdef ENABLE_WASM_REFTYPES
static const unsigned NumExtraPushed = 2; // tls and argv
#else
static const unsigned NumExtraPushed = 1; // argv
#endif
#ifdef JS_CODEGEN_ARM64
static const unsigned WasmPushSize = 16;
#else
static const unsigned WasmPushSize = sizeof(void*);
#endif
static const unsigned FramePushedBeforeAlign =
NonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
static void AssertExpectedSP(const MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
#endif
}
template <class Operand>
static void WasmPush(MacroAssembler& masm, const Operand& op) {
#ifdef JS_CODEGEN_ARM64
// Allocate a pad word so that SP can remain properly aligned. |op| will be
// written at the lower-addressed of the two words pushed here.
masm.reserveStack(WasmPushSize);
masm.storePtr(op, Address(masm.getStackPointer(), 0));
#else
masm.Push(op);
#endif
}
static void WasmPop(MacroAssembler& masm, Register r) {
#ifdef JS_CODEGEN_ARM64
// Also pop the pad word allocated by WasmPush.
masm.loadPtr(Address(masm.getStackPointer(), 0), r);
masm.freeStack(WasmPushSize);
#else
masm.Pop(r);
#endif
}
static void MoveSPForJitABI(MacroAssembler& masm) {
#ifdef JS_CODEGEN_ARM64
masm.moveStackPtrTo(PseudoStackPointer);
#endif
}
static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
const Maybe<ImmPtr>& funcPtr) {
MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
if (funcPtr) {
masm.call(*funcPtr);
} else {
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
}
}
STATIC_ASSERT_ANYREF_IS_JSOBJECT; // Strings are currently boxed
// Unboxing is branchy and contorted because of Spectre mitigations - we don't
// have enough scratch registers. Were it not for the spectre mitigations in
// branchTestObjClass, the branch nest below would be restructured significantly
// by inverting branches and using fewer registers.
// Unbox an anyref in src (clobbering src in the process) and then re-box it as
// a Value in *dst. See the definition of AnyRef for a discussion of pointer
// representation.
static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register tls,
Register src, const Address& dst,
Register scratch) {
MOZ_ASSERT(src != scratch);
// Not actually the value we're passing, but we've no way of
// decoding anything better.
GenPrintPtr(DebugChannel::Import, masm, src);
Label notNull, mustUnbox, done;
masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
masm.storeValue(NullValue(), dst);
masm.jump(&done);
masm.bind(&notNull);
// The type test will clear src if the test fails, so store early.
masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
// Spectre mitigations: see comment above about efficiency.
masm.branchTestObjClass(Assembler::Equal, src,
Address(tls, offsetof(TlsData, valueBoxClass)),
scratch, src, &mustUnbox);
masm.jump(&done);
masm.bind(&mustUnbox);
Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
masm.bind(&done);
}
// Unbox an anyref in src and then re-box it as a Value in dst.
// See the definition of AnyRef for a discussion of pointer representation.
static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register tls,
Register src, ValueOperand dst,
Register scratch) {
MOZ_ASSERT(src != scratch);
#if JS_BITS_PER_WORD == 32
MOZ_ASSERT(dst.typeReg() != scratch);
MOZ_ASSERT(dst.payloadReg() != scratch);
#else
MOZ_ASSERT(dst.valueReg() != scratch);
#endif
// Not actually the value we're passing, but we've no way of
// decoding anything better.
GenPrintPtr(DebugChannel::Import, masm, src);
Label notNull, mustUnbox, done;
masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
masm.moveValue(NullValue(), dst);
masm.jump(&done);
masm.bind(&notNull);
// The type test will clear src if the test fails, so store early.
masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
// Spectre mitigations: see comment above about efficiency.
masm.branchTestObjClass(Assembler::Equal, src,
Address(tls, offsetof(TlsData, valueBoxClass)),
scratch, src, &mustUnbox);
masm.jump(&done);
masm.bind(&mustUnbox);
masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
masm.bind(&done);
}
// Box the Value in src as an anyref in dest. src and dest must not overlap.
// See the definition of AnyRef for a discussion of pointer representation.
static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
Register dest, Label* oolConvert) {
Label nullValue, objectValue, done;
{
ScratchTagScope tag(masm, src);
masm.splitTagForTest(src, tag);
masm.branchTestObject(Assembler::Equal, tag, &objectValue);
masm.branchTestNull(Assembler::Equal, tag, &nullValue);
masm.jump(oolConvert);
}
masm.bind(&nullValue);
masm.xorPtr(dest, dest);
masm.jump(&done);
masm.bind(&objectValue);
masm.unboxObject(src, dest);
masm.bind(&done);
}
// Generate a stub that enters wasm from a C++ caller via the native ABI. The
// signature of the entry point is Module::ExportFuncPtr. The exported wasm
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AssertExpectedSP(masm);
masm.haltingAlign(CodeAlignment);
offsets->begin = masm.currentOffset();
// Save the return address if it wasn't already saved by the call insn.
#ifdef JS_USE_LINK_REGISTER
# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
defined(JS_CODEGEN_MIPS64)
masm.pushReturnAddress();
# elif defined(JS_CODEGEN_ARM64)
// WasmPush updates framePushed() unlike pushReturnAddress(), but that's
// cancelled by the setFramePushed() below.
WasmPush(masm, lr);
# else
MOZ_CRASH("Implement this");
# endif
#endif
// Save all caller non-volatile registers before we clobber them here and in
// the wasm callee (which does not preserve non-volatile registers).
masm.setFramePushed(0);
masm.PushRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
// Put the 'argv' argument into a non-argument/return/TLS register so that
// we can use 'argv' while we fill in the arguments for the wasm callee.
// Use a second non-argument/return register as temporary scratch.
Register argv = ABINonArgReturnReg0;
Register scratch = ABINonArgReturnReg1;
// Read the arguments of wasm::ExportFuncPtr according to the native ABI.
// The entry stub's frame is 1 word.
const unsigned argBase = sizeof(void*) + masm.framePushed();
ABIArgGenerator abi;
ABIArg arg;
// arg 1: ExportArg*
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), argv);
} else {
masm.loadPtr(
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
argv);
}
// Arg 2: TlsData*
arg = abi.next(MIRType::Pointer);
if (arg.kind() == ABIArg::GPR) {
masm.movePtr(arg.gpr(), WasmTlsReg);
} else {
masm.loadPtr(
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
WasmTlsReg);
}
#ifdef ENABLE_WASM_REFTYPES
WasmPush(masm, WasmTlsReg);
#endif
// Save 'argv' on the stack so that we can recover it after the call.
WasmPush(masm, argv);
// Since we're about to dynamically align the stack, reset the frame depth
// so we can still assert static stack depth balancing.
MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
masm.setFramePushed(0);
// Dynamically align the stack since ABIStackAlignment is not necessarily
// WasmStackAlignment. Preserve SP so it can be restored after the call.
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
#else
masm.moveStackPtrTo(scratch);
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
masm.Push(scratch);
#endif
// Reserve stack space for the call.
unsigned argDecrement = StackDecrementForCall(
WasmStackAlignment, masm.framePushed(), StackArgBytes(fe.funcType()));
masm.reserveStack(argDecrement);
// Copy parameters out of argv and into the wasm ABI registers/stack-slots.
SetupABIArguments(masm, fe, argv, scratch);
// Setup wasm register state. The nullness of the frame pointer is used to
// determine whether the call ended in success or failure.
masm.movePtr(ImmWord(0), FramePointer);
masm.loadWasmPinnedRegsFromTls();
// Call into the real function. Note that, due to the throw stub, fp, tls
// and pinned registers may be clobbered.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
// Pop the arguments pushed after the dynamic alignment.
masm.freeStack(argDecrement);
// Pop the stack pointer to its value right before dynamic alignment.
#ifdef JS_CODEGEN_ARM64
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
#else
masm.PopStackPtr();
#endif
MOZ_ASSERT(masm.framePushed() == 0);
masm.setFramePushed(FramePushedBeforeAlign);
// Recover the 'argv' pointer which was saved before aligning the stack.
WasmPop(masm, argv);
#ifdef ENABLE_WASM_REFTYPES
WasmPop(masm, WasmTlsReg);
#endif
// Store the register result, if any, in argv[0].
// No spectre.index_masking is required, as the value leaves ReturnReg.
StoreRegisterResult(masm, fe, argv);
// After the ReturnReg is stored into argv[0] but before fp is clobbered by
// the PopRegsInMask(NonVolatileRegs) below, set the return value based on
// whether fp is null (which is the case for successful returns) or the
// FailFP magic value (set by the throw stub);
Label success, join;
masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
#ifdef DEBUG
Label ok;
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
masm.move32(Imm32(false), ReturnReg);
masm.jump(&join);
masm.bind(&success);
masm.move32(Imm32(true), ReturnReg);
masm.bind(&join);
// Restore clobbered non-volatile registers of the caller.
masm.PopRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == 0);
#if defined(JS_CODEGEN_ARM64)
masm.setFramePushed(WasmPushSize);
WasmPop(masm, lr);
masm.abiret();
#else
masm.ret();
#endif
return FinishOffsets(masm, offsets);
}
#ifdef JS_PUNBOX64
static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
#else
static const ValueOperand ScratchValIonEntry =
ValueOperand(ABINonArgReg0, ABINonArgReg1);
#endif
static const Register ScratchIonEntry = ABINonArgReg2;
static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
SymbolicAddress sym) {
if (isAbsolute) {
masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
} else {
masm.call(sym);
}
}
// Load instance's TLS from the callee.
static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
// ScratchIonEntry := callee => JSFunction*
unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
ScratchIonEntry);
// ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
// => TlsData*
offset = FunctionExtended::offsetOfExtendedSlot(
FunctionExtended::WASM_TLSDATA_SLOT);
masm.loadPrivate(Address(ScratchIonEntry, offset), WasmTlsReg);
}
// Creates a JS fake exit frame for wasm, so the frame iterators just use
// JSJit frame iteration.
static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
AssertExpectedSP(masm);
MOZ_ASSERT(masm.framePushed() == frameSize);
GenerateJitEntryLoadTls(masm, frameSize);
masm.freeStack(frameSize);
MoveSPForJitABI(masm);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
ExitFrameType::WasmGenericJitEntry);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
ScratchIonEntry);
masm.loadPtr(
Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
ScratchIonEntry);
masm.jump(ScratchIonEntry);
}
// Helper function for allocating a BigInt and initializing it from an I64
// in GenerateJitEntry and GenerateImportInterpExit. The return result is
// written to scratch.
static void GenerateBigIntInitialization(MacroAssembler& masm,
unsigned bytesPushedByPrologue,
Register64 input, Register scratch,
const FuncExport* fe, Label* fail) {
#if JS_BITS_PER_WORD == 32
MOZ_ASSERT(input.low != scratch);
MOZ_ASSERT(input.high != scratch);
#else
MOZ_ASSERT(input.reg != scratch);
#endif
// We need to avoid clobbering other argument registers and the input.
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
masm.PushRegsInMask(save);
unsigned frameSize = StackDecrementForCall(
ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
masm.reserveStack(frameSize);
masm.assertStackAlignment(ABIStackAlignment);
// Needs to use a different call type depending on stub it's used from.
if (fe) {
CallSymbolicAddress(masm, !fe->hasEagerStubs(),
SymbolicAddress::AllocateBigInt);
} else {
masm.call(SymbolicAddress::AllocateBigInt);
}
masm.storeCallPointerResult(scratch);
masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
masm.assertStackAlignment(ABIStackAlignment);
masm.freeStack(frameSize);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(save, ignore);
masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
}
// Generate a stub that enters wasm from a jit code caller via the jit ABI.
//
// ARM64 note: This does not save the PseudoStackPointer so we must be sure to
// recompute it on every return path, be it normal return or exception return.
// The JIT code we return to assumes it is correct.
static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
Offsets* offsets) {
AssertExpectedSP(masm);
RegisterOrSP sp = masm.getStackPointer();
GenerateJitEntryPrologue(masm, offsets);
// The jit caller has set up the following stack layout (sp grows to the
// left):
// <-- retAddr | descriptor | callee | argc | this | arg1..N
unsigned normalBytesNeeded = StackArgBytes(fe.funcType());
MIRTypeVector coerceArgTypes;
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
unsigned oolBytesNeeded = StackArgBytes(coerceArgTypes);
unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
// Note the jit caller ensures the stack is aligned *after* the call
// instruction.
unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
masm.framePushed(), bytesNeeded);
// Reserve stack space for wasm ABI arguments, set up like this:
// <-- ABI args | padding
masm.reserveStack(frameSize);
GenerateJitEntryLoadTls(masm, frameSize);
#ifdef ENABLE_WASM_SIMD
if (fe.funcType().hasV128ArgOrRet()) {
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::ReportV128JSCall);
GenerateJitEntryThrow(masm, frameSize);
return FinishOffsets(masm, offsets);
}
#endif
FloatRegister scratchF = ABINonArgDoubleReg;
Register scratchG = ScratchIonEntry;
ValueOperand scratchV = ScratchValIonEntry;
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
fe.funcIndex());
// We do two loops:
// - one loop up-front will make sure that all the Value tags fit the
// expected signature argument types. If at least one inline conversion
// fails, we just jump to the OOL path which will call into C++. Inline
// conversions are ordered in the way we expect them to happen the most.
// - the second loop will unbox the arguments into the right registers.
Label oolCall;
for (size_t i = 0; i < fe.funcType().args().length(); i++) {
unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
Address jitArgAddr(sp, jitArgOffset);
masm.loadValue(jitArgAddr, scratchV);
Label next;
switch (fe.funcType().args()[i].kind()) {
case ValType::I32: {
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For int32 inputs, just skip.
masm.branchTestInt32(Assembler::Equal, tag, &next);
// For double inputs, unbox, truncate and store back.
Label storeBack, notDouble;
masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
{
ScratchTagScopeRelease _(&tag);
masm.unboxDouble(scratchV, scratchF);
masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
masm.jump(&storeBack);
}
masm.bind(&notDouble);
// For null or undefined, store 0.
Label nullOrUndefined, notNullOrUndefined;
masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
masm.bind(&nullOrUndefined);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(Int32Value(0), jitArgAddr);
}
masm.jump(&next);
masm.bind(&notNullOrUndefined);
// For booleans, store the number value back. Other types (symbol,
// object, strings) go to the C++ call.
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
masm.unboxBoolean(scratchV, scratchG);
// fallthrough:
masm.bind(&storeBack);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
}
break;
}
case ValType::I64: {
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For BigInt inputs, just skip. Otherwise go to C++ for other
// types that require creating a new BigInt or erroring.
masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
masm.jump(&next);
break;
}
case ValType::F32:
case ValType::F64: {
// Note we can reuse the same code for f32/f64 here, since for the
// case of f32, the conversion of f64 to f32 will happen in the
// second loop.
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For double inputs, just skip.
masm.branchTestDouble(Assembler::Equal, tag, &next);
// For int32 inputs, convert and rebox.
Label storeBack, notInt32;
{
ScratchTagScopeRelease _(&tag);
masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
masm.int32ValueToDouble(scratchV, scratchF);
masm.jump(&storeBack);
}
masm.bind(&notInt32);
// For undefined (missing argument), store NaN.
Label notUndefined;
masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
masm.jump(&next);
}
masm.bind(&notUndefined);
// +null is 0.
Label notNull;
masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
{
ScratchTagScopeRelease _(&tag);
masm.storeValue(DoubleValue(0.), jitArgAddr);
}
masm.jump(&next);
masm.bind(&notNull);
// For booleans, store the number value back. Other types (symbol,
// object, strings) go to the C++ call.
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
masm.boolValueToDouble(scratchV, scratchF);
// fallthrough:
masm.bind(&storeBack);
{
ScratchTagScopeRelease _(&tag);
masm.boxDouble(scratchF, jitArgAddr);
}
break;
}
case ValType::Ref: {
switch (fe.funcType().args()[i].refTypeKind()) {
case RefType::Any: {
ScratchTagScope tag(masm, scratchV);
masm.splitTagForTest(scratchV, tag);
// For object inputs, we handle object and null inline, everything
// else requires an actual box and we go out of line to allocate
// that.
masm.branchTestObject(Assembler::Equal, tag, &next);
masm.branchTestNull(Assembler::Equal, tag, &next);
masm.jump(&oolCall);
break;
}
case RefType::Func:
case RefType::TypeIndex: {
// Guarded against by temporarilyUnsupportedReftypeForEntry()
MOZ_CRASH("unexpected argument type when calling from the jit");
}
}
break;
}
case ValType::V128: {
// Guarded against by hasV128ArgOrRet()
MOZ_CRASH("unexpected argument type when calling from the jit");
}
default: {
MOZ_CRASH("unexpected argument type when calling from the jit");
}
}
masm.nopAlign(CodeAlignment);
masm.bind(&next);
}
Label rejoinBeforeCall;
masm.bind(&rejoinBeforeCall);
// Convert all the expected values to unboxed values on the stack.
ArgTypeVector args(fe.funcType());
for (ABIArgIter iter(args); !iter.done(); iter++) {
unsigned jitArgOffset =
frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
Address argv(sp, jitArgOffset);
bool isStackArg = iter->kind() == ABIArg::Stack;
switch (iter.mirType()) {
case MIRType::Int32: {
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
masm.unboxInt32(argv, target);
GenPrintIsize(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Int64: {
// The coercion has provided a BigInt value by this point, which
// we need to convert to an I64 here.
if (isStackArg) {
Address dst(sp, iter->offsetFromArgBase());
Register src = scratchV.payloadOrValueReg();
#if JS_BITS_PER_WORD == 64
Register64 scratch64(scratchG);
#else
Register64 scratch64(scratchG, ABINonArgReg3);
#endif
masm.unboxBigInt(argv, src);
masm.loadBigInt64(src, scratch64);
GenPrintI64(DebugChannel::Function, masm, scratch64);
masm.store64(scratch64, dst);
} else {
Register src = scratchG;
Register64 target = iter->gpr64();
masm.unboxBigInt(argv, src);
masm.loadBigInt64(src, target);
GenPrintI64(DebugChannel::Function, masm, target);
}
break;
}
case MIRType::Float32: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, ABINonArgDoubleReg);
masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
GenPrintF32(DebugChannel::Function, masm, target.asSingle());
if (isStackArg) {
masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::Double: {
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
masm.unboxDouble(argv, target);
GenPrintF64(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
case MIRType::RefOrNull: {
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
masm.unboxObjectOrNull(argv, target);
GenPrintPtr(DebugChannel::Function, masm, target);
if (isStackArg) {
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
}
break;
}
default: {
MOZ_CRASH("unexpected input argument when calling from jit");
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
// Setup wasm register state.
masm.loadWasmPinnedRegsFromTls();
// Call into the real function. Note that, due to the throw stub, fp, tls
// and pinned registers may be clobbered.
masm.assertStackAlignment(WasmStackAlignment);
CallFuncExport(masm, fe, funcPtr);
masm.assertStackAlignment(WasmStackAlignment);
// If fp is equal to the FailFP magic value (set by the throw stub), then
// report the exception to the JIT caller by jumping into the exception
// stub; otherwise the FP value is still set to the parent ion frame value.
Label exception;
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
// Pop arguments.
masm.freeStack(frameSize);
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex());
// Store the return value in the JSReturnOperand.
const ValTypeVector& results = fe.funcType().results();
if (results.length() == 0) {
GenPrintf(DebugChannel::Function, masm, "void");
masm.moveValue(UndefinedValue(), JSReturnOperand);
} else {
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
switch (results[0].kind()) {
case ValType::I32:
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
// No spectre.index_masking is required, as the value is boxed.
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
break;
case ValType::F32: {
masm.canonicalizeFloat(ReturnFloat32Reg);
masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ValType::F64: {
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
ScratchDoubleScope fpscratch(masm);
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
break;
}
case ValType::I64: {
GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, &fe,
&exception);
masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
break;
}
case ValType::V128: {
MOZ_CRASH("unexpected return type when calling from ion to wasm");
}
case ValType::Ref: {
switch (results[0].refTypeKind()) {
case RefType::Func:
// For FuncRef use the AnyRef path for now, since that will work.
case RefType::Any:
// Per comment above, the call may have clobbered the Tls register,
// so reload since unboxing will need it.
GenerateJitEntryLoadTls(masm, /* frameSize */ 0);
UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
JSReturnOperand, WasmJitEntryReturnScratch);
break;
case RefType::TypeIndex:
MOZ_CRASH("returning reference in jitentry NYI");
}
break;
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
MOZ_ASSERT(masm.framePushed() == 0);
#ifdef JS_CODEGEN_ARM64
masm.loadPtr(Address(sp, 0), lr);
masm.addToStackPtr(Imm32(8));
masm.moveStackPtrTo(PseudoStackPointer);
masm.abiret();
#else
masm.ret();
#endif
// Generate an OOL call to the C++ conversion path.
if (fe.funcType().args().length()) {
masm.bind(&oolCall);
masm.setFramePushed(frameSize);
ABIArgMIRTypeIter argsIter(coerceArgTypes);
// argument 0: function export index.
if (argsIter->kind() == ABIArg::GPR) {
masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
} else {
masm.storePtr(ImmWord(funcExportIndex),
Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
// argument 1: tlsData
if (argsIter->kind() == ABIArg::GPR) {
masm.movePtr(WasmTlsReg, argsIter->gpr());
} else {
masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
// argument 2: effective address of start of argv
Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
if (argsIter->kind() == ABIArg::GPR) {
masm.computeEffectiveAddress(argv, argsIter->gpr());
} else {
masm.computeEffectiveAddress(argv, ScratchIonEntry);
masm.storePtr(ScratchIonEntry,
Address(sp, argsIter->offsetFromArgBase()));
}
argsIter++;
MOZ_ASSERT(argsIter.done());
masm.assertStackAlignment(ABIStackAlignment);
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
SymbolicAddress::CoerceInPlace_JitEntry);
masm.assertStackAlignment(ABIStackAlignment);
// No spectre.index_masking is required, as the return value is used as a
// bool.
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
&rejoinBeforeCall);
}
// Prepare to throw: reload WasmTlsReg from the frame.
masm.bind(&exception);
masm.setFramePushed(frameSize);
GenerateJitEntryThrow(masm, frameSize);
return FinishOffsets(masm, offsets);
}
void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
const Instance& inst,
const JitCallStackArgVector& stackArgs,
bool profilingEnabled, Register scratch,
uint32_t* callOffset) {
MOZ_ASSERT(!IsCompilingWasm());
size_t framePushedAtStart = masm.framePushed();
if (profilingEnabled) {
// FramePointer isn't volatile, manually preserve it because it will be
// clobbered below.
masm.Push(FramePointer);
} else {
#ifdef DEBUG
// Ensure that the FramePointer is actually Ion-volatile. This might
// assert when bug 1426134 lands.
AllocatableRegisterSet set(RegisterSet::All());
TakeJitRegisters(/* profiling */ false, &set);
MOZ_ASSERT(set.has(FramePointer),
"replace the whole if branch by the then body when this fails");
#endif
}
// Note, if code here pushes a reference value into the frame for its own
// purposes (and not just as an argument to the callee) then the frame must be
// traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The
// callee will trace values that are pushed as arguments, however.
// Push a special frame descriptor that indicates the frame size so we can
// directly iterate from the current JIT frame without an extra call.
*callOffset = masm.buildFakeExitFrame(scratch);
masm.loadJSContext(scratch);
masm.moveStackPtrTo(FramePointer);
masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
// Move stack arguments to their final locations.
unsigned bytesNeeded = StackArgBytes(fe.funcType());
bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
bytesNeeded);
if (bytesNeeded) {
masm.reserveStack(bytesNeeded);
}
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
fe.funcIndex());
ArgTypeVector args(fe.funcType());
for (ABIArgIter iter(args); !iter.done(); iter++) {
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
if (iter->kind() != ABIArg::Stack) {
switch (iter.mirType()) {
case MIRType::Int32:
GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
break;
case MIRType::Int64:
GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
break;
case MIRType::Float32:
GenPrintF32(DebugChannel::Function, masm, iter->fpu());
break;
case MIRType::Double:
GenPrintF64(DebugChannel::Function, masm, iter->fpu());
break;
case MIRType::RefOrNull:
GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
break;
case MIRType::StackResults:
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
break;
default:
MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
}
continue;
}
Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
const JitCallStackArg& stackArg = stackArgs[iter.index()];
switch (stackArg.tag()) {
case JitCallStackArg::Tag::Imm32:
GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
masm.storePtr(ImmWord(stackArg.imm32()), dst);
break;
case JitCallStackArg::Tag::GPR:
MOZ_ASSERT(stackArg.gpr() != scratch);
MOZ_ASSERT(stackArg.gpr() != FramePointer);
GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
masm.storePtr(stackArg.gpr(), dst);
break;
case JitCallStackArg::Tag::FPU:
switch (iter.mirType()) {
case MIRType::Double:
GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
masm.storeDouble(stackArg.fpu(), dst);
break;
case MIRType::Float32:
GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
masm.storeFloat32(stackArg.fpu(), dst);
break;
default:
MOZ_CRASH(
"unexpected MIR type for a float register in wasm fast call");
}
break;
case JitCallStackArg::Tag::Address: {
// The address offsets were valid *before* we pushed our frame.
Address src = stackArg.addr();
src.offset += masm.framePushed() - framePushedAtStart;
switch (iter.mirType()) {
case MIRType::Double: {
ScratchDoubleScope fpscratch(masm);
GenPrintF64(DebugChannel::Function, masm, fpscratch);
masm.loadDouble(src, fpscratch);
masm.storeDouble(fpscratch, dst);
break;
}
case MIRType::Float32: {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
GenPrintF32(DebugChannel::Function, masm, fpscratch);
masm.storeFloat32(fpscratch, dst);
break;
}
case MIRType::Int32: {
masm.loadPtr(src, scratch);
GenPrintIsize(DebugChannel::Function, masm, scratch);
masm.storePtr(scratch, dst);
break;
}
case MIRType::RefOrNull: {
masm.loadPtr(src, scratch);
GenPrintPtr(DebugChannel::Function, masm, scratch);
masm.storePtr(scratch, dst);
break;
}
case MIRType::StackResults: {
MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
}
default: {
MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
}
}
break;
}
case JitCallStackArg::Tag::Undefined: {
MOZ_CRASH("can't happen because of arg.kind() check");
}
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
// Load tls; from now on, WasmTlsReg is live.
masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
masm.loadWasmPinnedRegsFromTls();
// Actual call.
const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
const MetadataTier& metadata = codeTier.metadata();
const CodeRange& codeRange = metadata.codeRange(fe);
void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
masm.assertStackAlignment(WasmStackAlignment);
masm.callJit(ImmPtr(callee));
#ifdef JS_CODEGEN_ARM64
// WASM does not use the emulated stack pointer, so reinitialize it as it
// might be clobbered either by WASM or by any C++ calls within.
masm.initPseudoStackPtr();
#endif
masm.assertStackAlignment(WasmStackAlignment);
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
masm.exceptionLabel());
// Store the return value in the appropriate place.
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
fe.funcIndex());
const ValTypeVector& results = fe.funcType().results();
if (results.length() == 0) {
masm.moveValue(UndefinedValue(), JSReturnOperand);
GenPrintf(DebugChannel::Function, masm, "void");
} else {
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
switch (results[0].kind()) {
case wasm::ValType::I32:
// The return value is in ReturnReg, which is what Ion expects.
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
#if defined(JS_CODEGEN_X64)
if (JitOptions.spectreIndexMasking) {
masm.movl(ReturnReg, ReturnReg);
}
#endif
break;
case wasm::ValType::I64:
// The return value is in ReturnReg64, which is what Ion expects.
GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
break;
case wasm::ValType::F32:
masm.canonicalizeFloat(ReturnFloat32Reg);
GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
break;
case wasm::ValType::F64:
masm.canonicalizeDouble(ReturnDoubleReg);
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
break;
case wasm::ValType::Ref:
switch (results[0].refTypeKind()) {
case wasm::RefType::Func:
// For FuncRef, use the AnyRef path for now, since that will work.
case wasm::RefType::Any:
// The call to wasm above preserves the WasmTlsReg, we don't need to
// reload it here.
UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
JSReturnOperand, WasmJitEntryReturnScratch);
break;
case wasm::RefType::TypeIndex:
MOZ_CRASH("unexpected return type when calling from ion to wasm");
}
break;
case wasm::ValType::V128:
MOZ_CRASH("unexpected return type when calling from ion to wasm");
}
}
GenPrintf(DebugChannel::Function, masm, "\n");
// Free args + frame descriptor.
masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
// If we pushed it, free FramePointer.
if (profilingEnabled) {
masm.Pop(FramePointer);
}
MOZ_ASSERT(framePushedAtStart == masm.framePushed());
}
static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
Address src, Address dst) {
if (type == MIRType::Int32) {
masm.load32(src, scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, dst);
} else if (type == MIRType::Int64) {
#if JS_BITS_PER_WORD == 32
GenPrintf(DebugChannel::Import, masm, "i64(");
masm.load32(LowWord(src), scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, LowWord(dst));
masm.load32(HighWord(src), scratch);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store32(scratch, HighWord(dst));
GenPrintf(DebugChannel::Import, masm, ") ");
#else
Register64 scratch64(scratch);
masm.load64(src, scratch64);
GenPrintIsize(DebugChannel::Import, masm, scratch);
masm.store64(scratch64, dst);
#endif
} else if (type == MIRType::RefOrNull || type == MIRType::Pointer ||
type == MIRType::StackResults) {
masm.loadPtr(src, scratch);
GenPrintPtr(DebugChannel::Import, masm, scratch);
masm.storePtr(scratch, dst);
} else if (type == MIRType::Float32) {
ScratchFloat32Scope fpscratch(masm);
masm.loadFloat32(src, fpscratch);
GenPrintF32(DebugChannel::Import, masm, fpscratch);
masm.storeFloat32(fpscratch, dst);
} else if (type == MIRType::Double) {
ScratchDoubleScope fpscratch(masm);
masm.loadDouble(src, fpscratch);
GenPrintF64(DebugChannel::Import, masm, fpscratch);
masm.storeDouble(fpscratch, dst);
#ifdef ENABLE_WASM_SIMD
} else if (type == MIRType::Simd128) {
ScratchSimd128Scope fpscratch(masm);
masm.loadUnalignedSimd128(src, fpscratch);
GenPrintV128(DebugChannel::Import, masm, fpscratch);
masm.storeUnalignedSimd128(fpscratch, dst);
#endif
} else {
MOZ_CRASH("StackCopy: unexpected type");
}
}
using ToValue = bool;
// Note, when toValue is true then this may destroy the values in incoming
// argument registers as a result of Spectre mitigation.
static void FillArgumentArrayForExit(
MacroAssembler& masm, Register tls, unsigned funcImportIndex,
const FuncType& funcType, unsigned argOffset,
unsigned offsetFromFPToCallerStackArgs, Register scratch, Register scratch2,
Register scratch3, ToValue toValue, Label* throwLabel) {
MOZ_ASSERT(scratch != scratch2);
MOZ_ASSERT(scratch != scratch3);
MOZ_ASSERT(scratch2 != scratch3);
// This loop does not root the values that are being constructed in
// for the arguments. Allocations that are generated by code either
// in the loop or called from it should be NoGC allocations.
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
funcImportIndex);
ArgTypeVector args(funcType);
for (ABIArgIter i(args); !i.done(); i++) {
Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
MIRType type = i.mirType();
MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
(type == MIRType::StackResults));
switch (i->kind()) {
case ABIArg::GPR:
if (type == MIRType::Int32) {
GenPrintIsize(DebugChannel::Import, masm, i->gpr());
if (toValue) {
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
} else {
masm.store32(i->gpr(), dst);
}
} else if (type == MIRType::Int64) {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
if (toValue) {
GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
i->gpr64(), scratch, nullptr,
throwLabel);
masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
} else {
masm.store64(i->gpr64(), dst);
}
} else if (type == MIRType::RefOrNull) {
if (toValue) {
// This works also for FuncRef because it is distinguishable from
// a boxed AnyRef.
masm.movePtr(i->gpr(), scratch2);
UnboxAnyrefIntoValue(masm, tls, scratch2, dst, scratch);
} else {
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
masm.storePtr(i->gpr(), dst);
}
} else if (type == MIRType::StackResults) {
MOZ_ASSERT(!toValue, "Multi-result exit to JIT unimplemented");
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
masm.storePtr(i->gpr(), dst);
} else {
MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
}
break;
#ifdef JS_CODEGEN_REGISTER_PAIR
case ABIArg::GPR_PAIR:
if (type == MIRType::Int64) {
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
if (toValue) {
GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,