Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmInstance-inl.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include <algorithm>
#include <utility>
#include "jsmath.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassemble.h"
#include "jit/JitCommon.h"
#include "jit/JitRuntime.h"
#include "jit/Registers.h"
#include "js/ForOfIterator.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "util/StringBuffer.h"
#include "util/Text.h"
#include "vm/BigIntType.h"
#include "vm/ErrorObject.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "wasm/TypedObject.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmDebug.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmJS.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValType.h"
#include "wasm/WasmValue.h"
#include "gc/StoreBuffer-inl.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/JSObject-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::BitwiseCast;
using mozilla::CheckedInt;
using mozilla::DebugOnly;
using CheckedU32 = CheckedInt<uint32_t>;
// Instance must be aligned at least as much as any of the integer, float,
// or SIMD values that we'd like to store in it.
static_assert(alignof(Instance) >=
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent)));
// The globalArea must be aligned at least as much as an instance. This is
// guaranteed to be sufficient for all data types we care about, including
// SIMD values. See the above assertion.
static_assert(Instance::offsetOfGlobalArea() % alignof(Instance) == 0);
// We want the memory base to be the first field, and accessible with no
// offset. This incidentally is also an assertion that there is no superclass
// with fields.
static_assert(Instance::offsetOfMemoryBase() == 0);
// We want instance fields that are commonly accessed by the JIT to have
// compact encodings. A limit of less than 128 bytes is chosen to fit within
// the signed 8-bit mod r/m x86 encoding.
static_assert(Instance::offsetOfLastCommonJitField() < 128);
//////////////////////////////////////////////////////////////////////////////
//
// Functions and invocation.
class FuncTypeIdSet {
using Map =
HashMap<const FuncType*, uint32_t, FuncTypeHashPolicy, SystemAllocPolicy>;
Map map_;
public:
~FuncTypeIdSet() {
MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), map_.empty());
}
bool allocateFuncTypeId(JSContext* cx, const FuncType& funcType,
const void** funcTypeId) {
Map::AddPtr p = map_.lookupForAdd(funcType);
if (p) {
MOZ_ASSERT(p->value() > 0);
p->value()++;
*funcTypeId = p->key();
return true;
}
UniquePtr<FuncType> clone = MakeUnique<FuncType>();
if (!clone || !clone->clone(funcType) || !map_.add(p, clone.get(), 1)) {
ReportOutOfMemory(cx);
return false;
}
*funcTypeId = clone.release();
MOZ_ASSERT(!(uintptr_t(*funcTypeId) & TypeIdDesc::ImmediateBit));
return true;
}
void deallocateFuncTypeId(const FuncType& funcType, const void* funcTypeId) {
Map::Ptr p = map_.lookup(funcType);
MOZ_RELEASE_ASSERT(p && p->key() == funcTypeId && p->value() > 0);
p->value()--;
if (!p->value()) {
js_delete(p->key());
map_.remove(p);
}
}
};
ExclusiveData<FuncTypeIdSet> funcTypeIdSet(mutexid::WasmFuncTypeIdSet);
const void** Instance::addressOfTypeId(const TypeIdDesc& typeId) const {
return (const void**)(globalData() + typeId.globalDataOffset());
}
FuncImportInstanceData& Instance::funcImportInstanceData(const FuncImport& fi) {
return *(FuncImportInstanceData*)(globalData() + fi.instanceOffset());
}
TableInstanceData& Instance::tableInstanceData(const TableDesc& td) const {
return *(TableInstanceData*)(globalData() + td.globalDataOffset);
}
GCPtrWasmTagObject& Instance::tagInstanceData(const TagDesc& td) const {
return *(GCPtrWasmTagObject*)(globalData() + td.globalDataOffset);
}
// TODO(1626251): Consolidate definitions into Iterable.h
static bool IterableToArray(JSContext* cx, HandleValue iterable,
MutableHandle<ArrayObject*> array) {
JS::ForOfIterator iterator(cx);
if (!iterator.init(iterable, JS::ForOfIterator::ThrowOnNonIterable)) {
return false;
}
array.set(NewDenseEmptyArray(cx));
if (!array) {
return false;
}
RootedValue nextValue(cx);
while (true) {
bool done;
if (!iterator.next(&nextValue, &done)) {
return false;
}
if (done) {
break;
}
if (!NewbornArrayPush(cx, array, nextValue)) {
return false;
}
}
return true;
}
static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
const Maybe<char*> stackResultsArea, uint64_t* argv,
MutableHandleValue rval) {
if (!stackResultsArea) {
MOZ_ASSERT(resultTypes.length() <= 1);
// Result is either one scalar value to unpack to a wasm value, or
// an ignored value for a zero-valued function.
if (resultTypes.length() == 1) {
return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
}
return true;
}
MOZ_ASSERT(stackResultsArea.isSome());
RootedArrayObject array(cx);
if (!IterableToArray(cx, rval, &array)) {
return false;
}
if (resultTypes.length() != array->length()) {
UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
UniqueChars got(JS_smprintf("%u", array->length()));
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
got.get());
return false;
}
DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
ABIResultIter iter(ResultType::Vector(resultTypes));
// The values are converted in the order they are pushed on the
// abstract WebAssembly stack; switch to iterate in push order.
while (!iter.done()) {
iter.next();
}
DebugOnly<bool> seenRegisterResult = false;
for (iter.switchToPrev(); !iter.done(); iter.prev()) {
const ABIResult& result = iter.cur();
MOZ_ASSERT(!seenRegisterResult);
// Use rval as a scratch area to hold the extracted result.
rval.set(array->getDenseElement(iter.index()));
if (result.inRegister()) {
// Currently, if a function type has results, there can be only
// one register result. If there is only one result, it is
// returned as a scalar and not an iterable, so we don't get here.
// If there are multiple results, we extract the register result
// and set `argv[0]` set to the extracted result, to be returned by
// register in the stub. The register result follows any stack
// results, so this preserves conversion order.
if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
return false;
}
seenRegisterResult = true;
continue;
}
uint32_t result_size = result.size();
MOZ_ASSERT(result_size == 4 || result_size == 8);
#ifdef DEBUG
if (previousOffset == ~(uint64_t)0) {
previousOffset = (uint64_t)result.stackOffset();
} else {
MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
(uint64_t)result.stackOffset());
previousOffset -= (uint64_t)result_size;
}
#endif
char* loc = stackResultsArea.value() + result.stackOffset();
if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
return false;
}
}
return true;
}
bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
unsigned argc, uint64_t* argv) {
AssertRealmUnchanged aru(cx);
Tier tier = code().bestTier();
const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
ArgTypeVector argTypes(fi.funcType());
InvokeArgs args(cx);
if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
return false;
}
if (fi.funcType().hasUnexposableArgOrRet()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_VAL_TYPE);
return false;
}
MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
Maybe<char*> stackResultPointer;
for (size_t i = 0; i < argc; i++) {
const void* rawArgLoc = &argv[i];
if (argTypes.isSyntheticStackResultPointerArg(i)) {
stackResultPointer = Some(*(char**)rawArgLoc);
continue;
}
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = fi.funcType().args()[naturalIndex];
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
FuncImportInstanceData& import = funcImportInstanceData(fi);
RootedFunction importFun(cx, import.fun);
MOZ_ASSERT(cx->realm() == importFun->realm());
RootedValue fval(cx, ObjectValue(*importFun));
RootedValue thisv(cx, UndefinedValue());
RootedValue rval(cx);
if (!Call(cx, fval, thisv, args, &rval)) {
return false;
}
if (!UnpackResults(cx, fi.funcType().results(), stackResultPointer, argv,
&rval)) {
return false;
}
if (!JitOptions.enableWasmJitExit) {
return true;
}
// The import may already have become optimized.
for (auto t : code().tiers()) {
void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
if (import.code == jitExitCode) {
return true;
}
}
void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
// Test if the function is JIT compiled.
if (!importFun->hasBytecode()) {
return true;
}
JSScript* script = importFun->nonLazyScript();
if (!script->hasJitScript()) {
return true;
}
// Skip if the function does not have a signature that allows for a JIT exit.
if (!fi.canHaveJitExit()) {
return true;
}
// Let's optimize it!
import.code = jitExitCode;
return true;
}
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
int32_t argc, uint64_t* argv) {
JSContext* cx = instance->cx();
return instance->callImport(cx, funcImportIndex, argc, argv);
}
//////////////////////////////////////////////////////////////////////////////
//
// Atomic operations and shared memory.
template <typename ValT, typename PtrT>
static int32_t PerformWait(Instance* instance, PtrT byteOffset, ValT value,
int64_t timeout_ns) {
JSContext* cx = instance->cx();
if (!instance->memory()->isShared()) {
ReportTrapError(cx, JSMSG_WASM_NONSHARED_WAIT);
return -1;
}
if (byteOffset & (sizeof(ValT) - 1)) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset + sizeof(ValT) > instance->memory()->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (timeout_ns >= 0) {
timeout = mozilla::Some(
mozilla::TimeDuration::FromMicroseconds(timeout_ns / 1000));
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(),
size_t(byteOffset), value, timeout)) {
case FutexThread::WaitResult::OK:
return 0;
case FutexThread::WaitResult::NotEqual:
return 1;
case FutexThread::WaitResult::TimedOut:
return 2;
case FutexThread::WaitResult::Error:
return -1;
default:
MOZ_CRASH();
}
}
/* static */ int32_t Instance::wait_i32_m32(Instance* instance,
uint32_t byteOffset, int32_t value,
int64_t timeout_ns) {
MOZ_ASSERT(SASigWaitI32M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i32_m64(Instance* instance,
uint64_t byteOffset, int32_t value,
int64_t timeout_ns) {
MOZ_ASSERT(SASigWaitI32M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m32(Instance* instance,
uint32_t byteOffset, int64_t value,
int64_t timeout_ns) {
MOZ_ASSERT(SASigWaitI64M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m64(Instance* instance,
uint64_t byteOffset, int64_t value,
int64_t timeout_ns) {
MOZ_ASSERT(SASigWaitI64M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, byteOffset, value, timeout_ns);
}
template <typename PtrT>
static int32_t PerformWake(Instance* instance, PtrT byteOffset, int32_t count) {
JSContext* cx = instance->cx();
// The alignment guard is not in the wasm spec as of 2017-11-02, but is
// considered likely to appear, as 4-byte alignment is required for WAKE by
// the spec's validation algorithm.
if (byteOffset & 3) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset >= instance->memory()->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
if (!instance->memory()->isShared()) {
return 0;
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(),
size_t(byteOffset), int64_t(count));
if (woken > INT32_MAX) {
ReportTrapError(cx, JSMSG_WASM_WAKE_OVERFLOW);
return -1;
}
return int32_t(woken);
}
/* static */ int32_t Instance::wake_m32(Instance* instance, uint32_t byteOffset,
int32_t count) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count);
}
/* static */ int32_t Instance::wake_m64(Instance* instance, uint64_t byteOffset,
int32_t count) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk memory operations.
/* static */ uint32_t Instance::memoryGrow_m32(Instance* instance,
uint32_t delta) {
MOZ_ASSERT(SASigMemoryGrowM32.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
RootedWasmMemoryObject memory(cx, instance->memory_);
// It is safe to cast to uint32_t, as all limits have been checked inside
// grow() and will not have been exceeded for a 32-bit memory.
uint32_t ret = uint32_t(WasmMemoryObject::grow(memory, uint64_t(delta), cx));
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(instance->memoryBase_ ==
instance->memory_->buffer().dataPointerEither());
return ret;
}
/* static */ uint64_t Instance::memoryGrow_m64(Instance* instance,
uint64_t delta) {
MOZ_ASSERT(SASigMemoryGrowM64.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
RootedWasmMemoryObject memory(cx, instance->memory_);
uint64_t ret = WasmMemoryObject::grow(memory, delta, cx);
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(instance->memoryBase_ ==
instance->memory_->buffer().dataPointerEither());
return ret;
}
/* static */ uint32_t Instance::memorySize_m32(Instance* instance) {
MOZ_ASSERT(SASigMemorySizeM32.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory()->volatilePages();
#ifdef JS_64BIT
// Ensure that the memory size is no more than 4GiB.
MOZ_ASSERT(pages <= Pages(MaxMemory32LimitField));
#endif
return uint32_t(pages.value());
}
/* static */ uint64_t Instance::memorySize_m64(Instance* instance) {
MOZ_ASSERT(SASigMemorySizeM64.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory()->volatilePages();
#ifdef JS_64BIT
MOZ_ASSERT(pages <= Pages(MaxMemory64LimitField));
#endif
return pages.value();
}
static inline bool BoundsCheckCopy(uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len,
size_t memLen) {
uint64_t dstOffsetLimit = uint64_t(dstByteOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcByteOffset) + uint64_t(len);
return dstOffsetLimit > memLen || srcOffsetLimit > memLen;
}
static inline bool BoundsCheckCopy(uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
size_t memLen) {
uint64_t dstOffsetLimit = dstByteOffset + len;
uint64_t srcOffsetLimit = srcByteOffset + len;
return dstOffsetLimit < dstByteOffset || dstOffsetLimit > memLen ||
srcOffsetLimit < srcByteOffset || srcOffsetLimit > memLen;
}
template <typename T, typename F, typename I>
inline int32_t WasmMemoryCopy(JSContext* cx, T memBase, size_t memLen,
I dstByteOffset, I srcByteOffset, I len,
F memMove) {
if (BoundsCheckCopy(dstByteOffset, srcByteOffset, len, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
memMove(memBase + uintptr_t(dstByteOffset),
memBase + uintptr_t(srcByteOffset), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryCopy(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryCopy(cx, memBase, memLen, dstByteOffset, srcByteOffset, len,
memmove);
}
template <typename I>
inline int32_t MemoryCopyShared(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const SharedArrayRawBuffer* rawBuf =
SharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, SharedMem<uint8_t*>::shared(memBase), memLen, dstByteOffset,
srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
/* static */ int32_t Instance::memCopy_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset,
uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopy_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset,
uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
static inline bool BoundsCheckFill(uint32_t byteOffset, uint32_t len,
size_t memLen) {
uint64_t offsetLimit = uint64_t(byteOffset) + uint64_t(len);
return offsetLimit > memLen;
}
static inline bool BoundsCheckFill(uint64_t byteOffset, uint64_t len,
size_t memLen) {
uint64_t offsetLimit = byteOffset + len;
return offsetLimit < byteOffset || offsetLimit > memLen;
}
template <typename T, typename F, typename I>
inline int32_t WasmMemoryFill(JSContext* cx, T memBase, size_t memLen,
I byteOffset, uint32_t value, I len, F memSet) {
if (BoundsCheckFill(byteOffset, len, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
memSet(memBase + uintptr_t(byteOffset), int(value), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryFill(JSContext* cx, I byteOffset, uint32_t value, I len,
uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryFill(cx, memBase, memLen, byteOffset, value, len, memset);
}
template <typename I>
inline int32_t MemoryFillShared(JSContext* cx, I byteOffset, uint32_t value,
I len, uint8_t* memBase) {
const SharedArrayRawBuffer* rawBuf =
SharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
return WasmMemoryFill(cx, SharedMem<uint8_t*>::shared(memBase), memLen,
byteOffset, value, len,
AtomicOperations::memsetSafeWhenRacy);
}
/* static */ int32_t Instance::memFill_m32(Instance* instance,
uint32_t byteOffset, uint32_t value,
uint32_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t value, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFill_m64(Instance* instance,
uint64_t byteOffset, uint32_t value,
uint64_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m64(Instance* instance,
uint64_t byteOffset,
uint32_t value, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
static bool BoundsCheckInit(uint32_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit > memLen || srcOffsetLimit > segLen;
}
static bool BoundsCheckInit(uint64_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = dstOffset + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit < dstOffset || dstOffsetLimit > memLen ||
srcOffsetLimit > segLen;
}
template <typename I>
static int32_t MemoryInit(JSContext* cx, Instance* instance, I dstOffset,
uint32_t srcOffset, uint32_t len,
const DataSegment* maybeSeg) {
if (!maybeSeg) {
if (len == 0 && srcOffset == 0) {
return 0;
}
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
const DataSegment& seg = *maybeSeg;
MOZ_RELEASE_ASSERT(!seg.active());
const uint32_t segLen = seg.bytes.length();
WasmMemoryObject* mem = instance->memory();
const size_t memLen = mem->volatileMemoryLength();
// We are proposing to copy
//
// seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
// to
// memoryBase[ dstOffset .. dstOffset + len - 1 ]
if (BoundsCheckInit(dstOffset, srcOffset, len, memLen, segLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required read/write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memcpySafeWhenRacy(
dataPtr + uintptr_t(dstOffset), (uint8_t*)seg.bytes.begin() + srcOffset,
len);
} else {
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
memcpy(rawBuf + uintptr_t(dstOffset),
(const char*)seg.bytes.begin() + srcOffset, len);
}
return 0;
}
/* static */ int32_t Instance::memInit_m32(Instance* instance,
uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex) {
MOZ_ASSERT(SASigMemInitM32.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
/* static */ int32_t Instance::memInit_m64(Instance* instance,
uint64_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex) {
MOZ_ASSERT(SASigMemInitM64.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk table operations.
/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t dstTableIndex,
uint32_t srcTableIndex) {
MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
const SharedTable& srcTable = instance->tables()[srcTableIndex];
uint32_t srcTableLen = srcTable->length();
const SharedTable& dstTable = instance->tables()[dstTableIndex];
uint32_t dstTableLen = dstTable->length();
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
bool isOOM = false;
if (&srcTable == &dstTable && dstOffset > srcOffset) {
for (uint32_t i = len; i > 0; i--) {
if (!dstTable->copy(cx, *srcTable, dstOffset + (i - 1),
srcOffset + (i - 1))) {
isOOM = true;
break;
}
}
} else if (&srcTable == &dstTable && dstOffset == srcOffset) {
// No-op
} else {
for (uint32_t i = 0; i < len; i++) {
if (!dstTable->copy(cx, *srcTable, dstOffset + i, srcOffset + i)) {
isOOM = true;
break;
}
}
}
if (isOOM) {
return -1;
}
return 0;
}
bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
uint32_t dstOffset, uint32_t srcOffset, uint32_t len) {
Table& table = *tables_[tableIndex];
MOZ_ASSERT(dstOffset <= table.length());
MOZ_ASSERT(len <= table.length() - dstOffset);
Tier tier = code().bestTier();
const MetadataTier& metadataTier = metadata(tier);
const FuncImportVector& funcImports = metadataTier.funcImports;
const CodeRangeVector& codeRanges = metadataTier.codeRanges;
const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
const Uint32Vector& elemFuncIndices = seg.elemFuncIndices;
MOZ_ASSERT(srcOffset <= elemFuncIndices.length());
MOZ_ASSERT(len <= elemFuncIndices.length() - srcOffset);
uint8_t* codeBaseTier = codeBase(tier);
for (uint32_t i = 0; i < len; i++) {
uint32_t funcIndex = elemFuncIndices[srcOffset + i];
if (funcIndex == NullFuncIndex) {
table.setNull(dstOffset + i);
} else if (!table.isFunction()) {
// Note, fnref must be rooted if we do anything more than just store it.
void* fnref = Instance::refFunc(this, funcIndex);
if (fnref == AnyRef::invalid().forCompiledCode()) {
return false; // OOM, which has already been reported.
}
table.fillAnyRef(dstOffset + i, 1, AnyRef::fromCompiledCode(fnref));
} else {
if (funcIndex < metadataTier.funcImports.length()) {
FuncImportInstanceData& import =
funcImportInstanceData(funcImports[funcIndex]);
JSFunction* fun = import.fun;
if (IsWasmExportedFunction(fun)) {
// This element is a wasm function imported from another
// instance. To preserve the === function identity required by
// the JS embedding spec, we must set the element to the
// imported function's underlying CodeRange.funcCheckedCallEntry and
// Instance so that future Table.get()s produce the same
// function object as was imported.
WasmInstanceObject* calleeInstanceObj =
ExportedFunctionToInstanceObject(fun);
Instance& calleeInstance = calleeInstanceObj->instance();
Tier calleeTier = calleeInstance.code().bestTier();
const CodeRange& calleeCodeRange =
calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier);
void* code = calleeInstance.codeBase(calleeTier) +
calleeCodeRange.funcCheckedCallEntry();
table.setFuncRef(dstOffset + i, code, &calleeInstance);
continue;
}
}
void* code =
codeBaseTier +
codeRanges[funcToCodeRange[funcIndex]].funcCheckedCallEntry();
table.setFuncRef(dstOffset + i, code, this);
}
}
return true;
}
/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
if (!instance->passiveElemSegments_[segIndex]) {
if (len == 0 && srcOffset == 0) {
return 0;
}
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
const ElemSegment& seg = *instance->passiveElemSegments_[segIndex];
MOZ_RELEASE_ASSERT(!seg.active());
const uint32_t segLen = seg.length();
const Table& table = *instance->tables()[tableIndex];
const uint32_t tableLen = table.length();
// We are proposing to copy
//
// seg[ srcOffset .. srcOffset + len - 1 ]
// to
// tableBase[ dstOffset .. dstOffset + len - 1 ]
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
return -1; // OOM, which has already been reported.
}
return 0;
}
/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
void* value, uint32_t len,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
// Bounds check and deal with arithmetic overflow.
uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
if (offsetLimit > table.length()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
const Table& table = *instance->tables()[tableIndex];
if (index >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return AnyRef::invalid().forCompiledCode();
}
switch (table.repr()) {
case TableRepr::Ref:
return table.getAnyRef(index).forCompiledCode();
case TableRepr::Func: {
MOZ_RELEASE_ASSERT(!table.isAsmJS());
RootedFunction fun(cx);
if (!table.getFuncRef(cx, index, &fun)) {
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
}
MOZ_CRASH("Should not happen");
}
/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
uint32_t delta, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
JSContext* cx = instance->cx();
RootedAnyRef ref(cx, AnyRef::fromCompiledCode(initValue));
Table& table = *instance->tables()[tableIndex];
uint32_t oldSize = table.grow(delta);
if (oldSize != uint32_t(-1) && initValue != nullptr) {
switch (table.repr()) {
case TableRepr::Ref:
table.fillAnyRef(oldSize, delta, ref);
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(oldSize, delta, FuncRef::fromAnyRefUnchecked(ref),
cx);
break;
}
}
return oldSize;
}
/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
void* value, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
if (index >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.fillAnyRef(index, 1, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
/* static */ uint32_t Instance::tableSize(Instance* instance,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
Table& table = *instance->tables()[tableIndex];
return table.length();
}
/* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
Tier tier = instance->code().bestTier();
const MetadataTier& metadataTier = instance->metadata(tier);
const FuncImportVector& funcImports = metadataTier.funcImports;
// If this is an import, we need to recover the original function to maintain
// reference equality between a re-exported function and 'ref.func'. The
// identity of the imported function object is stable across tiers, which is
// what we want.
//
// Use the imported function only if it is an exported function, otherwise
// fall through to get a (possibly new) exported function.
if (funcIndex < funcImports.length()) {
FuncImportInstanceData& import =
instance->funcImportInstanceData(funcImports[funcIndex]);
if (IsWasmExportedFunction(import.fun)) {
return FuncRef::fromJSFunction(import.fun).forCompiledCode();
}
}
RootedFunction fun(cx);
RootedWasmInstanceObject instanceObj(cx, instance->object());
if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
&fun)) {
// Validation ensures that we always have a valid funcIndex, so we must
// have OOM'ed
ReportOutOfMemory(cx);
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
//////////////////////////////////////////////////////////////////////////////
//
// Segment management.
/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigElemDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
if (!instance->passiveElemSegments_[segIndex]) {
return 0;
}
SharedElemSegment& segRefPtr = instance->passiveElemSegments_[segIndex];
MOZ_RELEASE_ASSERT(!segRefPtr->active());
// Drop this instance's reference to the ElemSegment so it can be released.
segRefPtr = nullptr;
return 0;
}
/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
if (!instance->passiveDataSegments_[segIndex]) {
return 0;
}
SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
MOZ_RELEASE_ASSERT(!segRefPtr->active());
// Drop this instance's reference to the DataSegment so it can be released.
segRefPtr = nullptr;
return 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// Object support.
/* static */ void Instance::preBarrierFiltering(Instance* instance,
gc::Cell** location) {
MOZ_ASSERT(SASigPreBarrierFiltering.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
gc::PreWriteBarrier(*reinterpret_cast<JSObject**>(location));
}
/* static */ void Instance::postBarrier(Instance* instance,
gc::Cell** location) {
MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
JSContext* cx = instance->cx();
cx->runtime()->gc.storeBuffer().putCell(
reinterpret_cast<JSObject**>(location));
}
/* static */ void Instance::postBarrierPrecise(Instance* instance,
JSObject** location,
JSObject* prev) {
MOZ_ASSERT(SASigPostBarrierPrecise.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
JSObject* next = *location;
JSObject::postWriteBarrier(location, prev, next);
}
/* static */ void Instance::postBarrierFiltering(Instance* instance,
gc::Cell** location) {
MOZ_ASSERT(SASigPostBarrierFiltering.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
if (*location == nullptr || !gc::IsInsideNursery(*location)) {
return;
}
JSContext* cx = instance->cx();
cx->runtime()->gc.storeBuffer().putCell(
reinterpret_cast<JSObject**>(location));
}
//////////////////////////////////////////////////////////////////////////////
//
// GC and exception handling support.
// The typeIndex is an index into the rttValues_ table in the instance.
// That table holds RttValue objects.
//
// When we fail to allocate we return a nullptr; the wasm side must check this
// and propagate it as an error.
/* static */ void* Instance::structNew(Instance* instance, void* structDescr) {
MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
Rooted<RttValue*> rttValue(cx, (RttValue*)structDescr);
MOZ_ASSERT(rttValue);
return TypedObject::createStruct(cx, rttValue);
}
/* static */ void* Instance::arrayNew(Instance* instance, uint32_t length,
void* arrayDescr) {
MOZ_ASSERT(SASigArrayNew.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
Rooted<RttValue*> rttValue(cx, (RttValue*)arrayDescr);
MOZ_ASSERT(rttValue);
return TypedObject::createArray(cx, rttValue, length);
}
/* static */ void* Instance::exceptionNew(Instance* instance, JSObject* tag) {
MOZ_ASSERT(SASigExceptionNew.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
RootedWasmTagObject tagObj(cx, &tag->as<WasmTagObject>());
RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmException));
RootedObject stack(cx, nullptr);
return AnyRef::fromJSObject(
WasmExceptionObject::create(cx, tagObj, stack, proto))
.forCompiledCode();
}
/* static */ int32_t Instance::throwException(Instance* instance,
JSObject* exn) {
MOZ_ASSERT(SASigThrowException.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
RootedValue exnVal(cx, UnboxAnyRef(AnyRef::fromJSObject(exn)));
cx->setPendingException(exnVal, nullptr);
// By always returning -1, we trigger a wasmTrap(Trap::ThrowReported),
// and use that to trigger the stack walking for this exception.
return -1;
}
/* static */ int32_t Instance::refTest(Instance* instance, void* refPtr,
void* rttPtr) {
MOZ_ASSERT(SASigRefTest.failureMode == FailureMode::Infallible);
if (!refPtr) {
return 0;
}
JSContext* cx = instance->cx();
ASSERT_ANYREF_IS_JSOBJECT;
RootedTypedObject ref(
cx, (TypedObject*)AnyRef::fromCompiledCode(refPtr).asJSObject());
RootedRttValue rtt(
cx, &AnyRef::fromCompiledCode(rttPtr).asJSObject()->as<RttValue>());
return int32_t(ref->isRuntimeSubtype(rtt));
}
/* static */ void* Instance::rttSub(Instance* instance, void* rttParentPtr,
void* rttSubCanonPtr) {
MOZ_ASSERT(SASigRttSub.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
ASSERT_ANYREF_IS_JSOBJECT;
RootedRttValue parentRtt(
cx, &AnyRef::fromCompiledCode(rttParentPtr).asJSObject()->as<RttValue>());
RootedRttValue subCanonRtt(
cx,
&AnyRef::fromCompiledCode(rttSubCanonPtr).asJSObject()->as<RttValue>());
RootedRttValue subRtt(cx, RttValue::rttSub(cx, parentRtt, subCanonRtt));
return AnyRef::fromJSObject(subRtt.get()).forCompiledCode();
}
/* static */ int32_t Instance::intrI8VecMul(Instance* instance, uint32_t dest,
uint32_t src1, uint32_t src2,
uint32_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigIntrI8VecMul.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
// Bounds check and deal with arithmetic overflow.
uint64_t destLimit = uint64_t(dest) + uint64_t(len);
uint64_t src1Limit = uint64_t(src1) + uint64_t(len);
uint64_t src2Limit = uint64_t(src2) + uint64_t(len);
if (destLimit > memLen || src1Limit > memLen || src2Limit > memLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// Basic dot product
uint8_t* destPtr = &memBase[dest];
uint8_t* src1Ptr = &memBase[src1];
uint8_t* src2Ptr = &memBase[src2];
while (len > 0) {
*destPtr = (*src1Ptr) * (*src2Ptr);
destPtr++;
src1Ptr++;
src2Ptr++;
len--;
}
return 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// Instance creation and related.
Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
SharedCode code, HandleWasmMemoryObject memory,
SharedTableVector&& tables, UniqueDebugState maybeDebug)
: realm_(cx->realm()),
jsJitArgsRectifier_(
cx->runtime()->jitRuntime()->getArgumentsRectifier().value),
jsJitExceptionHandler_(
cx->runtime()->jitRuntime()->getExceptionTail().value),
preBarrierCode_(
cx->runtime()->jitRuntime()->preBarrier(MIRType::Object).value),
object_(object),
code_(std::move(code)),
memory_(memory),
tables_(std::move(tables)),
maybeDebug_(std::move(maybeDebug)),
debugFilter_(nullptr)
#ifdef ENABLE_WASM_GC
,
hasGcTypes_(false)
#endif
{
}
Instance* Instance::create(JSContext* cx, HandleWasmInstanceObject object,
SharedCode code, uint32_t globalDataLength,
HandleWasmMemoryObject memory,
SharedTableVector&& tables,
UniqueDebugState maybeDebug) {
void* base = js_calloc(alignof(Instance) + offsetof(Instance, globalArea_) +
globalDataLength);
if (!base) {
ReportOutOfMemory(cx);
return nullptr;
}
void* aligned = (void*)AlignBytes(uintptr_t(base), alignof(Instance));
auto* instance = new (aligned) Instance(
cx, object, code, memory, std::move(tables), std::move(maybeDebug));
instance->allocatedBase_ = base;
return instance;
}
void Instance::destroy(Instance* instance) {
instance->~Instance();
js_free(instance->allocatedBase_);
}
bool Instance::init(JSContext* cx, const JSFunctionVector& funcImports,
const ValVector& globalImportValues,
const WasmGlobalObjectVector& globalObjs,
const WasmTagObjectVector& tagObjs,
const DataSegmentVector& dataSegments,
const ElemSegmentVector& elemSegments) {
MOZ_ASSERT(!!maybeDebug_ == metadata().debugEnabled);
#ifdef DEBUG
for (auto t : code_->tiers()) {
MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
}
#endif
MOZ_ASSERT(tables_.length() == metadata().tables.length());
memoryBase_ =
memory_ ? memory_->buffer().dataPointerEither().unwrap() : nullptr;
size_t limit = memory_ ? memory_->boundsCheckLimit() : 0;
#if !defined(JS_64BIT) || defined(ENABLE_WASM_CRANELIFT)
// We assume that the limit is a 32-bit quantity
MOZ_ASSERT(limit <= UINT32_MAX);
#endif
boundsCheckLimit_ = limit;
cx_ = cx;
valueBoxClass_ = &WasmValueBox::class_;
resetInterrupt(cx);
jumpTable_ = code_->tieringJumpTable();
debugFilter_ = nullptr;
addressOfNeedsIncrementalBarrier_ =
cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
// Initialize function imports in the instance data
Tier callerTier = code_->bestTier();
for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
JSFunction* f = funcImports[i];
const FuncImport& fi = metadata(callerTier).funcImports[i];
FuncImportInstanceData& import = funcImportInstanceData(fi);
import.fun = f;
if (!isAsmJS() && IsWasmExportedFunction(f)) {
WasmInstanceObject* calleeInstanceObj =
ExportedFunctionToInstanceObject(f);
Instance& calleeInstance = calleeInstanceObj->instance();
Tier calleeTier = calleeInstance.code().bestTier();
const CodeRange& codeRange =
calleeInstanceObj->getExportedFunctionCodeRange(f, calleeTier);
import.instance = &calleeInstance;
import.realm = f->realm();
import.code = calleeInstance.codeBase(calleeTier) +
codeRange.funcUncheckedCallEntry();
} else if (void* thunk = MaybeGetBuiltinThunk(f, fi.funcType())) {
import.instance = this;
import.realm = f->realm();
import.code = thunk;
} else {
import.instance = this;
import.realm = f->realm();
import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
}
}
// Initialize tables in the instance data
for (size_t i = 0; i < tables_.length(); i++) {
const TableDesc& td = metadata().tables[i];
TableInstanceData& table = tableInstanceData(td);
table.length = tables_[i]->length();
table.elements = tables_[i]->instanceElements();
}
// Initialize tags in the instance data
for (size_t i = 0; i < metadata().tags.length(); i++) {
const TagDesc& td = metadata().tags[i];
MOZ_ASSERT(td.globalDataOffset != UINT32_MAX);
MOZ_ASSERT(tagObjs[i] != nullptr);
tagInstanceData(td) = tagObjs[i];
}