Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmInstance-inl.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include <algorithm>
#include <utility>
#include "jsmath.h"
#include "builtin/String.h"
#include "gc/Barrier.h"
#include "gc/Marking.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassemble.h"
#include "jit/JitCommon.h"
#include "jit/JitRuntime.h"
#include "jit/Registers.h"
#include "js/ForOfIterator.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/Stack.h" // JS::NativeStackLimitMin
#include "util/StringBuffer.h"
#include "util/Text.h"
#include "util/Unicode.h"
#include "vm/ArrayBufferObject.h"
#include "vm/BigIntType.h"
#include "vm/Compartment.h"
#include "vm/ErrorObject.h"
#include "vm/Interpreter.h"
#include "vm/Iteration.h"
#include "vm/JitActivation.h"
#include "vm/JSFunction.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmDebug.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmFeatures.h"
#include "wasm/WasmInitExpr.h"
#include "wasm/WasmJS.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmModuleTypes.h"
#include "wasm/WasmPI.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValType.h"
#include "wasm/WasmValue.h"
#include "gc/Marking-inl.h"
#include "gc/StoreBuffer-inl.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/JSObject-inl.h"
#include "wasm/WasmGcObject-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::BitwiseCast;
using mozilla::CheckedUint32;
using mozilla::DebugOnly;
// Instance must be aligned at least as much as any of the integer, float,
// or SIMD values that we'd like to store in it.
static_assert(alignof(Instance) >=
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent)));
// The globalArea must be aligned at least as much as an instance. This is
// guaranteed to be sufficient for all data types we care about, including
// SIMD values. See the above assertion.
static_assert(Instance::offsetOfData() % alignof(Instance) == 0);
// We want the memory base to be the first field, and accessible with no
// offset. This incidentally is also an assertion that there is no superclass
// with fields.
static_assert(Instance::offsetOfMemory0Base() == 0);
// We want instance fields that are commonly accessed by the JIT to have
// compact encodings. A limit of less than 128 bytes is chosen to fit within
// the signed 8-bit mod r/m x86 encoding.
static_assert(Instance::offsetOfLastCommonJitField() < 128);
//////////////////////////////////////////////////////////////////////////////
//
// Functions and invocation.
TypeDefInstanceData* Instance::typeDefInstanceData(uint32_t typeIndex) const {
TypeDefInstanceData* instanceData =
(TypeDefInstanceData*)(data() + metadata().typeDefsOffsetStart);
return &instanceData[typeIndex];
}
const void* Instance::addressOfGlobalCell(const GlobalDesc& global) const {
const void* cell = data() + global.offset();
// Indirect globals store a pointer to their cell in the instance global
// data. Dereference it to find the real cell.
if (global.isIndirect()) {
cell = *(const void**)cell;
}
return cell;
}
FuncImportInstanceData& Instance::funcImportInstanceData(const FuncImport& fi) {
return *(FuncImportInstanceData*)(data() + fi.instanceOffset());
}
MemoryInstanceData& Instance::memoryInstanceData(uint32_t memoryIndex) const {
MemoryInstanceData* instanceData =
(MemoryInstanceData*)(data() + metadata().memoriesOffsetStart);
return instanceData[memoryIndex];
}
TableInstanceData& Instance::tableInstanceData(uint32_t tableIndex) const {
TableInstanceData* instanceData =
(TableInstanceData*)(data() + metadata().tablesOffsetStart);
return instanceData[tableIndex];
}
TagInstanceData& Instance::tagInstanceData(uint32_t tagIndex) const {
TagInstanceData* instanceData =
(TagInstanceData*)(data() + metadata().tagsOffsetStart);
return instanceData[tagIndex];
}
static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
const Maybe<char*> stackResultsArea, uint64_t* argv,
MutableHandleValue rval) {
if (!stackResultsArea) {
MOZ_ASSERT(resultTypes.length() <= 1);
// Result is either one scalar value to unpack to a wasm value, or
// an ignored value for a zero-valued function.
if (resultTypes.length() == 1) {
return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
}
return true;
}
MOZ_ASSERT(stackResultsArea.isSome());
Rooted<ArrayObject*> array(cx);
if (!IterableToArray(cx, rval, &array)) {
return false;
}
if (resultTypes.length() != array->length()) {
UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
UniqueChars got(JS_smprintf("%u", array->length()));
if (!expected || !got) {
ReportOutOfMemory(cx);
return false;
}
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
got.get());
return false;
}
DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
ABIResultIter iter(ResultType::Vector(resultTypes));
// The values are converted in the order they are pushed on the
// abstract WebAssembly stack; switch to iterate in push order.
while (!iter.done()) {
iter.next();
}
DebugOnly<bool> seenRegisterResult = false;
for (iter.switchToPrev(); !iter.done(); iter.prev()) {
const ABIResult& result = iter.cur();
MOZ_ASSERT(!seenRegisterResult);
// Use rval as a scratch area to hold the extracted result.
rval.set(array->getDenseElement(iter.index()));
if (result.inRegister()) {
// Currently, if a function type has results, there can be only
// one register result. If there is only one result, it is
// returned as a scalar and not an iterable, so we don't get here.
// If there are multiple results, we extract the register result
// and set `argv[0]` set to the extracted result, to be returned by
// register in the stub. The register result follows any stack
// results, so this preserves conversion order.
if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
return false;
}
seenRegisterResult = true;
continue;
}
uint32_t result_size = result.size();
MOZ_ASSERT(result_size == 4 || result_size == 8);
#ifdef DEBUG
if (previousOffset == ~(uint64_t)0) {
previousOffset = (uint64_t)result.stackOffset();
} else {
MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
(uint64_t)result.stackOffset());
previousOffset -= (uint64_t)result_size;
}
#endif
char* loc = stackResultsArea.value() + result.stackOffset();
if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
return false;
}
}
return true;
}
bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
unsigned argc, uint64_t* argv) {
AssertRealmUnchanged aru(cx);
Tier tier = code().bestTier();
const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
const FuncType& funcType = metadata().getFuncImportType(fi);
ArgTypeVector argTypes(funcType);
InvokeArgs args(cx);
if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
return false;
}
if (funcType.hasUnexposableArgOrRet()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_VAL_TYPE);
return false;
}
MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
Maybe<char*> stackResultPointer;
size_t lastBoxIndexPlusOne = 0;
{
JS::AutoAssertNoGC nogc;
for (size_t i = 0; i < argc; i++) {
const void* rawArgLoc = &argv[i];
if (argTypes.isSyntheticStackResultPointerArg(i)) {
stackResultPointer = Some(*(char**)rawArgLoc);
continue;
}
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
// Avoid boxes creation not to trigger GC.
if (ToJSValueMayGC(type)) {
lastBoxIndexPlusOne = i + 1;
continue;
}
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
}
// Visit arguments that need to perform allocation in a second loop
// after the rest of arguments are converted.
for (size_t i = 0; i < lastBoxIndexPlusOne; i++) {
if (argTypes.isSyntheticStackResultPointerArg(i)) {
continue;
}
const void* rawArgLoc = &argv[i];
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
if (!ToJSValueMayGC(type)) {
continue;
}
MOZ_ASSERT(!type.isRefRepr());
// The conversions are safe here because source values are not references
// and will not be moved.
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
FuncImportInstanceData& import = funcImportInstanceData(fi);
Rooted<JSObject*> importCallable(cx, import.callable);
MOZ_ASSERT(cx->realm() == importCallable->nonCCWRealm());
RootedValue fval(cx, ObjectValue(*importCallable));
RootedValue thisv(cx, UndefinedValue());
RootedValue rval(cx);
if (!Call(cx, fval, thisv, args, &rval)) {
return false;
}
if (!UnpackResults(cx, funcType.results(), stackResultPointer, argv, &rval)) {
return false;
}
if (!JitOptions.enableWasmJitExit) {
return true;
}
#ifdef ENABLE_WASM_JSPI
// Disable jit exit optimization when JSPI is enabled.
if (JSPromiseIntegrationAvailable(cx)) {
return true;
}
#endif
// The import may already have become optimized.
for (auto t : code().tiers()) {
void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
if (import.code == jitExitCode) {
return true;
}
}
void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
if (!importCallable->is<JSFunction>()) {
return true;
}
// Test if the function is JIT compiled.
if (!importCallable->as<JSFunction>().hasBytecode()) {
return true;
}
JSScript* script = importCallable->as<JSFunction>().nonLazyScript();
if (!script->hasJitScript()) {
return true;
}
// Skip if the function does not have a signature that allows for a JIT exit.
if (!funcType.canHaveJitExit()) {
return true;
}
// Let's optimize it!
import.code = jitExitCode;
return true;
}
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
int32_t argc, uint64_t* argv) {
JSContext* cx = instance->cx();
#ifdef ENABLE_WASM_JSPI
if (IsSuspendableStackActive(cx)) {
return CallImportOnMainThread(cx, instance, funcImportIndex, argc, argv);
}
#endif
return instance->callImport(cx, funcImportIndex, argc, argv);
}
//////////////////////////////////////////////////////////////////////////////
//
// Atomic operations and shared memory.
template <typename ValT, typename PtrT>
static int32_t PerformWait(Instance* instance, uint32_t memoryIndex,
PtrT byteOffset, ValT value, int64_t timeout_ns) {
JSContext* cx = instance->cx();
if (!instance->memory(memoryIndex)->isShared()) {
ReportTrapError(cx, JSMSG_WASM_NONSHARED_WAIT);
return -1;
}
if (byteOffset & (sizeof(ValT) - 1)) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset + sizeof(ValT) >
instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (timeout_ns >= 0) {
timeout = mozilla::Some(
mozilla::TimeDuration::FromMicroseconds(double(timeout_ns) / 1000));
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), value, timeout)) {
case FutexThread::WaitResult::OK:
return 0;
case FutexThread::WaitResult::NotEqual:
return 1;
case FutexThread::WaitResult::TimedOut:
return 2;
case FutexThread::WaitResult::Error:
return -1;
default:
MOZ_CRASH();
}
}
/* static */ int32_t Instance::wait_i32_m32(Instance* instance,
uint32_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i32_m64(Instance* instance,
uint64_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m32(Instance* instance,
uint32_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m64(Instance* instance,
uint64_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
template <typename PtrT>
static int32_t PerformWake(Instance* instance, PtrT byteOffset, int32_t count,
uint32_t memoryIndex) {
JSContext* cx = instance->cx();
// The alignment guard is not in the wasm spec as of 2017-11-02, but is
// considered likely to appear, as 4-byte alignment is required for WAKE by
// the spec's validation algorithm.
if (byteOffset & 3) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset >= instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
if (!instance->memory(memoryIndex)->isShared()) {
return 0;
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), int64_t(count));
if (woken > INT32_MAX) {
ReportTrapError(cx, JSMSG_WASM_WAKE_OVERFLOW);
return -1;
}
return int32_t(woken);
}
/* static */ int32_t Instance::wake_m32(Instance* instance, uint32_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
/* static */ int32_t Instance::wake_m64(Instance* instance, uint64_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk memory operations.
/* static */ uint32_t Instance::memoryGrow_m32(Instance* instance,
uint32_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM32.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
// It is safe to cast to uint32_t, as all limits have been checked inside
// grow() and will not have been exceeded for a 32-bit memory.
uint32_t ret = uint32_t(WasmMemoryObject::grow(memory, uint64_t(delta), cx));
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint64_t Instance::memoryGrow_m64(Instance* instance,
uint64_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM64.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
uint64_t ret = WasmMemoryObject::grow(memory, delta, cx);
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint32_t Instance::memorySize_m32(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM32.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
// Ensure that the memory size is no more than 4GiB.
MOZ_ASSERT(pages <= Pages(MaxMemory32LimitField));
#endif
return uint32_t(pages.value());
}
/* static */ uint64_t Instance::memorySize_m64(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM64.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
MOZ_ASSERT(pages <= Pages(MaxMemory64LimitField));
#endif
return pages.value();
}
template <typename PointerT, typename CopyFuncT, typename IndexT>
inline int32_t WasmMemoryCopy(JSContext* cx, PointerT dstMemBase,
PointerT srcMemBase, size_t dstMemLen,
size_t srcMemLen, IndexT dstByteOffset,
IndexT srcByteOffset, IndexT len,
CopyFuncT memMove) {
if (!MemoryBoundsCheck(dstByteOffset, len, dstMemLen) ||
!MemoryBoundsCheck(srcByteOffset, len, srcMemLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
memMove(dstMemBase + uintptr_t(dstByteOffset),
srcMemBase + uintptr_t(srcByteOffset), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryCopy(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryCopy(cx, memBase, memBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, memmove);
}
template <typename I>
inline int32_t MemoryCopyShared(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
SharedMem<uint8_t*> sharedMemBase = SharedMem<uint8_t*>::shared(memBase);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, sharedMemBase, sharedMemBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
/* static */ int32_t Instance::memCopy_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset,
uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopy_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset,
uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
// Dynamic dispatch to get the length of a memory given just the base and
// whether it is shared or not. This is only used for memCopy_any, where being
// slower is okay.
static inline size_t GetVolatileByteLength(uint8_t* memBase, bool isShared) {
if (isShared) {
return WasmSharedArrayRawBuffer::fromDataPtr(memBase)->volatileByteLength();
}
return WasmArrayRawBuffer::fromDataPtr(memBase)->byteLength();
}
/* static */ int32_t Instance::memCopy_any(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint32_t dstMemIndex,
uint32_t srcMemIndex) {
MOZ_ASSERT(SASigMemCopyAny.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const MemoryInstanceData& dstMemory =
instance->memoryInstanceData(dstMemIndex);
const MemoryInstanceData& srcMemory =
instance->memoryInstanceData(srcMemIndex);
uint8_t* dstMemBase = dstMemory.base;
uint8_t* srcMemBase = srcMemory.base;
size_t dstMemLen = GetVolatileByteLength(dstMemBase, dstMemory.isShared);
size_t srcMemLen = GetVolatileByteLength(srcMemBase, srcMemory.isShared);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, SharedMem<uint8_t*>::shared(dstMemBase),
SharedMem<uint8_t*>::shared(srcMemBase), dstMemLen, srcMemLen,
dstByteOffset, srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
template <typename T, typename F, typename I>
inline int32_t WasmMemoryFill(JSContext* cx, T memBase, size_t memLen,
I byteOffset, uint32_t value, I len, F memSet) {
if (!MemoryBoundsCheck(byteOffset, len, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
memSet(memBase + uintptr_t(byteOffset), int(value), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryFill(JSContext* cx, I byteOffset, uint32_t value, I len,
uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryFill(cx, memBase, memLen, byteOffset, value, len, memset);
}
template <typename I>
inline int32_t MemoryFillShared(JSContext* cx, I byteOffset, uint32_t value,
I len, uint8_t* memBase) {
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
return WasmMemoryFill(cx, SharedMem<uint8_t*>::shared(memBase), memLen,
byteOffset, value, len,
AtomicOperations::memsetSafeWhenRacy);
}
/* static */ int32_t Instance::memFill_m32(Instance* instance,
uint32_t byteOffset, uint32_t value,
uint32_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t value, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFill_m64(Instance* instance,
uint64_t byteOffset, uint32_t value,
uint64_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m64(Instance* instance,
uint64_t byteOffset,
uint32_t value, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
static bool BoundsCheckInit(uint32_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit > memLen || srcOffsetLimit > segLen;
}
static bool BoundsCheckInit(uint64_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = dstOffset + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit < dstOffset || dstOffsetLimit > memLen ||
srcOffsetLimit > segLen;
}
template <typename I>
static int32_t MemoryInit(JSContext* cx, Instance* instance,
uint32_t memoryIndex, I dstOffset, uint32_t srcOffset,
uint32_t len, const DataSegment* maybeSeg) {
if (!maybeSeg) {
if (len == 0 && srcOffset == 0) {
return 0;
}
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
const DataSegment& seg = *maybeSeg;
MOZ_RELEASE_ASSERT(!seg.active());
const uint32_t segLen = seg.bytes.length();
WasmMemoryObject* mem = instance->memory(memoryIndex);
const size_t memLen = mem->volatileMemoryLength();
// We are proposing to copy
//
// seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
// to
// memoryBase[ dstOffset .. dstOffset + len - 1 ]
if (BoundsCheckInit(dstOffset, srcOffset, len, memLen, segLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required read/write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memcpySafeWhenRacy(
dataPtr + uintptr_t(dstOffset), (uint8_t*)seg.bytes.begin() + srcOffset,
len);
} else {
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
memcpy(rawBuf + uintptr_t(dstOffset),
(const char*)seg.bytes.begin() + srcOffset, len);
}
return 0;
}
/* static */ int32_t Instance::memInit_m32(Instance* instance,
uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM32.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
/* static */ int32_t Instance::memInit_m64(Instance* instance,
uint64_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM64.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk table operations.
/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t dstTableIndex,
uint32_t srcTableIndex) {
MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
const SharedTable& srcTable = instance->tables()[srcTableIndex];
uint32_t srcTableLen = srcTable->length();
const SharedTable& dstTable = instance->tables()[dstTableIndex];
uint32_t dstTableLen = dstTable->length();
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
bool isOOM = false;
if (&srcTable == &dstTable && dstOffset > srcOffset) {
for (uint32_t i = len; i > 0; i--) {
if (!dstTable->copy(cx, *srcTable, dstOffset + (i - 1),
srcOffset + (i - 1))) {
isOOM = true;
break;
}
}
} else if (&srcTable == &dstTable && dstOffset == srcOffset) {
// No-op
} else {
for (uint32_t i = 0; i < len; i++) {
if (!dstTable->copy(cx, *srcTable, dstOffset + i, srcOffset + i)) {
isOOM = true;
break;
}
}
}
if (isOOM) {
return -1;
}
return 0;
}
#ifdef DEBUG
static bool AllSegmentsArePassive(const DataSegmentVector& vec) {
for (const DataSegment* seg : vec) {
if (seg->active()) {
return false;
}
}
return true;
}
#endif
bool Instance::initSegments(JSContext* cx,
const DataSegmentVector& dataSegments,
const ModuleElemSegmentVector& elemSegments) {
MOZ_ASSERT_IF(metadata().memories.length() == 0,
AllSegmentsArePassive(dataSegments));
Rooted<WasmInstanceObject*> instanceObj(cx, object());
// Write data/elem segments into memories/tables.
for (const ModuleElemSegment& seg : elemSegments) {
if (seg.active()) {
RootedVal offsetVal(cx);
if (!seg.offset().evaluate(cx, instanceObj, &offsetVal)) {
return false; // OOM
}
uint32_t offset = offsetVal.get().i32();
uint32_t tableLength = tables()[seg.tableIndex]->length();
if (offset > tableLength || tableLength - offset < seg.numElements()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
if (!initElems(seg.tableIndex, seg, offset)) {
return false; // OOM
}
}
}
for (const DataSegment* seg : dataSegments) {
if (!seg->active()) {
continue;
}
Rooted<const WasmMemoryObject*> memoryObj(cx, memory(seg->memoryIndex));
size_t memoryLength = memoryObj->volatileMemoryLength();
uint8_t* memoryBase =
memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
RootedVal offsetVal(cx);
if (!seg->offset().evaluate(cx, instanceObj, &offsetVal)) {
return false; // OOM
}
uint64_t offset = memoryObj->indexType() == IndexType::I32
? offsetVal.get().i32()
: offsetVal.get().i64();
uint32_t count = seg->bytes.length();
if (offset > memoryLength || memoryLength - offset < count) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
memcpy(memoryBase + uintptr_t(offset), seg->bytes.begin(), count);
}
return true;
}
bool Instance::initElems(uint32_t tableIndex, const ModuleElemSegment& seg,
uint32_t dstOffset) {
Table& table = *tables_[tableIndex];
MOZ_ASSERT(dstOffset <= table.length());
MOZ_ASSERT(seg.numElements() <= table.length() - dstOffset);
if (seg.numElements() == 0) {
return true;
}
Rooted<WasmInstanceObject*> instanceObj(cx(), object());
if (table.isFunction() &&
seg.encoding == ModuleElemSegment::Encoding::Indices) {
// Initialize this table of functions without creating any intermediate
// JSFunctions.
bool ok = iterElemsFunctions(
seg, [&](uint32_t i, void* code, Instance* instance) -> bool {
table.setFuncRef(dstOffset + i, code, instance);
return true;
});
if (!ok) {
return false;
}
} else {
bool ok = iterElemsAnyrefs(seg, [&](uint32_t i, AnyRef ref) -> bool {
table.setRef(dstOffset + i, ref);
return true;
});
if (!ok) {
return false;
}
}
return true;
}
template <typename F>
bool Instance::iterElemsFunctions(const ModuleElemSegment& seg,
const F& onFunc) {
// In the future, we could theoretically get function data (instance + code
// pointer) from segments with the expression encoding without creating
// JSFunctions. But that is not how it works today. We can only bypass the
// creation of JSFunctions for the index encoding.
MOZ_ASSERT(seg.encoding == ModuleElemSegment::Encoding::Indices);
if (seg.numElements() == 0) {
return true;
}
Tier tier = code().bestTier();
const MetadataTier& metadataTier = metadata(tier);
const FuncImportVector& funcImports = metadataTier.funcImports;
const CodeRangeVector& codeRanges = metadataTier.codeRanges;
const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
const Uint32Vector& elemIndices = seg.elemIndices;
uint8_t* codeBaseTier = codeBase(tier);
for (uint32_t i = 0; i < seg.numElements(); i++) {
uint32_t elemIndex = elemIndices[i];
if (elemIndex < metadataTier.funcImports.length()) {
FuncImportInstanceData& import =
funcImportInstanceData(funcImports[elemIndex]);
MOZ_ASSERT(import.callable->isCallable());
if (import.callable->is<JSFunction>()) {
JSFunction* fun = &import.callable->as<JSFunction>();
if (IsWasmExportedFunction(fun)) {
// This element is a wasm function imported from another
// instance. To preserve the === function identity required by
// the JS embedding spec, we must get the imported function's
// underlying CodeRange.funcCheckedCallEntry and Instance so that
// future Table.get()s produce the same function object as was
// imported.
WasmInstanceObject* calleeInstanceObj =
ExportedFunctionToInstanceObject(fun);
Instance& calleeInstance = calleeInstanceObj->instance();
Tier calleeTier = calleeInstance.code().bestTier();
const CodeRange& calleeCodeRange =
calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier);
void* code = calleeInstance.codeBase(calleeTier) +
calleeCodeRange.funcCheckedCallEntry();
if (!onFunc(i, code, &calleeInstance)) {
return false;
}
continue;
}
}
}
void* code = codeBaseTier +
codeRanges[funcToCodeRange[elemIndex]].funcCheckedCallEntry();
if (!onFunc(i, code, this)) {
return false;
}
}
return true;
}
template <typename F>
bool Instance::iterElemsAnyrefs(const ModuleElemSegment& seg,
const F& onAnyRef) {
if (seg.numElements() == 0) {
return true;
}
switch (seg.encoding) {
case ModuleElemSegment::Encoding::Indices: {
// The only types of indices that exist right now are function indices, so
// this code is specialized to functions.
for (uint32_t i = 0; i < seg.numElements(); i++) {
uint32_t funcIndex = seg.elemIndices[i];
// Note, fnref must be rooted if we do anything more than just store it.
void* fnref = Instance::refFunc(this, funcIndex);
if (fnref == AnyRef::invalid().forCompiledCode()) {
return false; // OOM, which has already been reported.
}
if (!onAnyRef(i, AnyRef::fromCompiledCode(fnref))) {
return false;
}
}
} break;
case ModuleElemSegment::Encoding::Expressions: {
Rooted<WasmInstanceObject*> instanceObj(cx(), object());
const ModuleElemSegment::Expressions& exprs = seg.elemExpressions;
UniqueChars error;
// The offset is a dummy because the expression has already been
// validated.
Decoder d(exprs.exprBytes.begin(), exprs.exprBytes.end(), 0, &error);
for (uint32_t i = 0; i < seg.numElements(); i++) {
RootedVal result(cx());
if (!InitExpr::decodeAndEvaluate(cx(), instanceObj, d, seg.elemType,
&result)) {
MOZ_ASSERT(!error); // The only possible failure should be OOM.
return false;
}
// We would need to root this AnyRef if we were doing anything other
// than storing it.
AnyRef ref = result.get().ref();
if (!onAnyRef(i, ref)) {
return false;
}
}
} break;
default:
MOZ_CRASH("unknown encoding type for element segment");
}
return true;
}
/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
const InstanceElemSegment& seg = instance->passiveElemSegments_[segIndex];
const uint32_t segLen = seg.length();
Table& table = *instance->tables()[tableIndex];
const uint32_t tableLen = table.length();
// We are proposing to copy
//
// seg[ srcOffset .. srcOffset + len - 1 ]
// to
// tableBase[ dstOffset .. dstOffset + len - 1 ]
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
for (size_t i = 0; i < len; i++) {
table.setRef(dstOffset + i, seg[srcOffset + i]);
}
return 0;
}
/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
void* value, uint32_t len,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
// Bounds check and deal with arithmetic overflow.
uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
if (offsetLimit > table.length()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
template <typename I>
static bool WasmDiscardCheck(Instance* instance, I byteOffset, I byteLen,
size_t memLen, bool shared) {
JSContext* cx = instance->cx();
if (byteOffset % wasm::PageSize != 0 || byteLen % wasm::PageSize != 0) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return false;
}
if (!MemoryBoundsCheck(byteOffset, byteLen, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
return true;
}
template <typename I>
static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, false)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
}
template <typename I>
static int32_t MemDiscardShared(Instance* instance, I byteOffset, I byteLen,
uint8_t* memBase) {
WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, true)) {
return -1;
}
rawBuf->discard(byteOffset, byteLen);
return 0;
}
/* static */ int32_t Instance::memDiscard_m32(Instance* instance,
uint32_t byteOffset,
uint32_t byteLen,
uint8_t* memBase) {
return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscard_m64(Instance* instance,
uint64_t byteOffset,
uint64_t byteLen,
uint8_t* memBase) {
return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscardShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t byteLen,
uint8_t* memBase) {
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ int32_t Instance::memDiscardShared_m64(Instance* instance,
uint64_t byteOffset,
uint64_t byteLen,
uint8_t* memBase) {
return MemDiscardShared(instance, byteOffset, byteLen, memBase);
}
/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
const Table& table = *instance->tables()[tableIndex];
if (index >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return AnyRef::invalid().forCompiledCode();
}
switch (table.repr()) {
case TableRepr::Ref:
return table.getAnyRef(index).forCompiledCode();
case TableRepr::Func: {
MOZ_RELEASE_ASSERT(!table.isAsmJS());
RootedFunction fun(cx);
if (!table.getFuncRef(cx, index, &fun)) {
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
}
MOZ_CRASH("Should not happen");
}
/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
uint32_t delta, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
JSContext* cx = instance->cx();
RootedAnyRef ref(cx, AnyRef::fromCompiledCode(initValue));
Table& table = *instance->tables()[tableIndex];
uint32_t oldSize = table.grow(delta);
if (oldSize != uint32_t(-1) && initValue != nullptr) {
table.fillUninitialized(oldSize, delta, ref, cx);
}
#ifdef DEBUG
if (!table.elemType().isNullable()) {
table.assertRangeNotNull(oldSize, delta);
}
#endif // DEBUG
return oldSize;
}
/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
void* value, uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
Table& table = *instance->tables()[tableIndex];
if (index >= table.length()) {
ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
return -1;
}
switch (table.repr()) {
case TableRepr::Ref:
table.setAnyRef(index, AnyRef::fromCompiledCode(value));
break;
case TableRepr::Func:
MOZ_RELEASE_ASSERT(!table.isAsmJS());
table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value), cx);
break;
}
return 0;
}
/* static */ uint32_t Instance::tableSize(Instance* instance,
uint32_t tableIndex) {
MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
Table& table = *instance->tables()[tableIndex];
return table.length();
}
/* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
JSContext* cx = instance->cx();
Tier tier = instance->code().bestTier();
const MetadataTier& metadataTier = instance->metadata(tier);
const FuncImportVector& funcImports = metadataTier.funcImports;
// If this is an import, we need to recover the original function to maintain
// reference equality between a re-exported function and 'ref.func'. The
// identity of the imported function object is stable across tiers, which is
// what we want.
//
// Use the imported function only if it is an exported function, otherwise
// fall through to get a (possibly new) exported function.
if (funcIndex < funcImports.length()) {
FuncImportInstanceData& import =
instance->funcImportInstanceData(funcImports[funcIndex]);
if (import.callable->is<JSFunction>()) {
JSFunction* fun = &import.callable->as<JSFunction>();
if (IsWasmExportedFunction(fun)) {
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
}
}
RootedFunction fun(cx);
Rooted<WasmInstanceObject*> instanceObj(cx, instance->object());
if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
&fun)) {
// Validation ensures that we always have a valid funcIndex, so we must
// have OOM'ed
ReportOutOfMemory(cx);
return AnyRef::invalid().forCompiledCode();
}
return FuncRef::fromJSFunction(fun).forCompiledCode();
}
//////////////////////////////////////////////////////////////////////////////
//
// Segment management.
/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigElemDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
"ensured by validation");
instance->passiveElemSegments_[segIndex].clearAndFree();
return 0;
}
/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
if (!instance->passiveDataSegments_[segIndex]) {
return 0;
}
SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
MOZ_RELEASE_ASSERT(!segRefPtr->active());
// Drop this instance's reference to the DataSegment so it can be released.
segRefPtr = nullptr;
return 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// AnyRef support.
/* static */ void Instance::postBarrier(Instance* instance, void** location) {
MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
MOZ_ASSERT(location);
instance->storeBuffer_->putWasmAnyRef(
reinterpret_cast<wasm::AnyRef*>(location));
}
/* static */ void Instance::postBarrierPrecise(Instance* instance,
void** location, void* prev) {
MOZ_ASSERT(SASigPostBarrierPrecise.failureMode == FailureMode::Infallible);
postBarrierPreciseWithOffset(instance, location, /*offset=*/0, prev);
}
/* static */ void Instance::postBarrierPreciseWithOffset(Instance* instance,
void** base,
uint32_t offset,
void* prev) {
MOZ_ASSERT(SASigPostBarrierPreciseWithOffset.failureMode ==
FailureMode::Infallible);
MOZ_ASSERT(base);
wasm::AnyRef* location = (wasm::AnyRef*)(uintptr_t(base) + size_t(offset));
wasm::AnyRef next = *location;
InternalBarrierMethods<AnyRef>::postBarrier(
location, wasm::AnyRef::fromCompiledCode(prev), next);
}
//////////////////////////////////////////////////////////////////////////////
//
// GC and exception handling support.
/* static */
template <bool ZeroFields>
void* Instance::structNewIL(Instance* instance,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT((ZeroFields ? SASigStructNewIL_true : SASigStructNewIL_false)
.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new struct will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmStructObject::createStructIL<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap());
}
template void* Instance::structNewIL<true>(Instance* instance,
TypeDefInstanceData* typeDefData);
template void* Instance::structNewIL<false>(Instance* instance,
TypeDefInstanceData* typeDefData);
/* static */
template <bool ZeroFields>
void* Instance::structNewOOL(Instance* instance,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT((ZeroFields ? SASigStructNewOOL_true : SASigStructNewOOL_false)
.failureMode == FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new struct will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmStructObject::createStructOOL<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap());
}
template void* Instance::structNewOOL<true>(Instance* instance,
TypeDefInstanceData* typeDefData);
template void* Instance::structNewOOL<false>(Instance* instance,
TypeDefInstanceData* typeDefData);
/* static */
template <bool ZeroFields>
void* Instance::arrayNew(Instance* instance, uint32_t numElements,
TypeDefInstanceData* typeDefData) {
MOZ_ASSERT(
(ZeroFields ? SASigArrayNew_true : SASigArrayNew_false).failureMode ==
FailureMode::FailOnNullPtr);
JSContext* cx = instance->cx();
// The new array will be allocated in an initial heap as determined by
// pretenuring logic as set up in `Instance::init`.
return WasmArrayObject::createArray<ZeroFields>(
cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements);
}
template void* Instance::arrayNew<true>(Instance* instance,
uint32_t numElements,
TypeDefInstanceData* typeDefData);
template void* Instance::arrayNew<false>(Instance* instance,
uint32_t numElements,
TypeDefInstanceData* typeDefData);
// Copies from a data segment into a wasm GC array. Performs the necessary
// bounds checks, accounting for the array's element size. If this function