Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmInstance-inl.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include <algorithm>
#include <utility>
#include "jsmath.h"
#include "builtin/String.h"
#include "gc/Barrier.h"
#include "gc/Marking.h"
#include "jit/AtomicOperations.h"
#include "jit/Disassemble.h"
#include "jit/JitCommon.h"
#include "jit/JitRuntime.h"
#include "jit/Registers.h"
#include "js/ForOfIterator.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/Stack.h" // JS::NativeStackLimitMin
#include "util/StringBuilder.h"
#include "util/Text.h"
#include "util/Unicode.h"
#include "vm/ArrayBufferObject.h"
#include "vm/BigIntType.h"
#include "vm/Compartment.h"
#include "vm/ErrorObject.h"
#include "vm/Interpreter.h"
#include "vm/Iteration.h"
#include "vm/JitActivation.h"
#include "vm/JSFunction.h"
#include "vm/PlainObject.h" // js::PlainObject
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmDebug.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmFeatures.h"
#include "wasm/WasmInitExpr.h"
#include "wasm/WasmJS.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmModuleTypes.h"
#include "wasm/WasmPI.h"
#include "wasm/WasmStubs.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValType.h"
#include "wasm/WasmValue.h"
#include "gc/Marking-inl.h"
#include "gc/StoreBuffer-inl.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/JSObject-inl.h"
#include "wasm/WasmGcObject-inl.h"
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::CheckedUint32;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
// Instance must be aligned at least as much as any of the integer, float,
// or SIMD values that we'd like to store in it.
static_assert(alignof(Instance) >=
std::max(sizeof(Registers::RegisterContent),
sizeof(FloatRegisters::RegisterContent)));
// The globalArea must be aligned at least as much as an instance. This is
// guaranteed to be sufficient for all data types we care about, including
// SIMD values. See the above assertion.
static_assert(Instance::offsetOfData() % alignof(Instance) == 0);
// We want the memory base to be the first field, and accessible with no
// offset. This incidentally is also an assertion that there is no superclass
// with fields.
static_assert(Instance::offsetOfMemory0Base() == 0);
// We want instance fields that are commonly accessed by the JIT to have
// compact encodings. A limit of less than 128 bytes is chosen to fit within
// the signed 8-bit mod r/m x86 encoding.
static_assert(Instance::offsetOfLastCommonJitField() < 128);
//////////////////////////////////////////////////////////////////////////////
//
// Functions and invocation.
FuncDefInstanceData* Instance::funcDefInstanceData(uint32_t funcIndex) const {
MOZ_ASSERT(funcIndex >= codeMeta().numFuncImports);
uint32_t funcDefIndex = funcIndex - codeMeta().numFuncImports;
FuncDefInstanceData* instanceData =
(FuncDefInstanceData*)(data() + codeMeta().funcDefsOffsetStart);
return &instanceData[funcDefIndex];
}
TypeDefInstanceData* Instance::typeDefInstanceData(uint32_t typeIndex) const {
TypeDefInstanceData* instanceData =
(TypeDefInstanceData*)(data() + codeMeta().typeDefsOffsetStart);
return &instanceData[typeIndex];
}
const void* Instance::addressOfGlobalCell(const GlobalDesc& global) const {
const void* cell = data() + global.offset();
// Indirect globals store a pointer to their cell in the instance global
// data. Dereference it to find the real cell.
if (global.isIndirect()) {
cell = *(const void**)cell;
}
return cell;
}
FuncImportInstanceData& Instance::funcImportInstanceData(uint32_t funcIndex) {
MOZ_ASSERT(funcIndex < codeMeta().numFuncImports);
FuncImportInstanceData* instanceData =
(FuncImportInstanceData*)(data() + codeMeta().funcImportsOffsetStart);
return instanceData[funcIndex];
}
FuncExportInstanceData& Instance::funcExportInstanceData(
uint32_t funcExportIndex) {
FuncExportInstanceData* instanceData =
(FuncExportInstanceData*)(data() + codeMeta().funcExportsOffsetStart);
return instanceData[funcExportIndex];
}
MemoryInstanceData& Instance::memoryInstanceData(uint32_t memoryIndex) const {
MemoryInstanceData* instanceData =
(MemoryInstanceData*)(data() + codeMeta().memoriesOffsetStart);
return instanceData[memoryIndex];
}
TableInstanceData& Instance::tableInstanceData(uint32_t tableIndex) const {
TableInstanceData* instanceData =
(TableInstanceData*)(data() + codeMeta().tablesOffsetStart);
return instanceData[tableIndex];
}
TagInstanceData& Instance::tagInstanceData(uint32_t tagIndex) const {
TagInstanceData* instanceData =
(TagInstanceData*)(data() + codeMeta().tagsOffsetStart);
return instanceData[tagIndex];
}
static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
const Maybe<char*> stackResultsArea, uint64_t* argv,
MutableHandleValue rval) {
if (!stackResultsArea) {
MOZ_ASSERT(resultTypes.length() <= 1);
// Result is either one scalar value to unpack to a wasm value, or
// an ignored value for a zero-valued function.
if (resultTypes.length() == 1) {
return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
}
return true;
}
MOZ_ASSERT(stackResultsArea.isSome());
Rooted<ArrayObject*> array(cx);
if (!IterableToArray(cx, rval, &array)) {
return false;
}
if (resultTypes.length() != array->length()) {
UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
UniqueChars got(JS_smprintf("%u", array->length()));
if (!expected || !got) {
ReportOutOfMemory(cx);
return false;
}
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
got.get());
return false;
}
DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
ABIResultIter iter(ResultType::Vector(resultTypes));
// The values are converted in the order they are pushed on the
// abstract WebAssembly stack; switch to iterate in push order.
while (!iter.done()) {
iter.next();
}
DebugOnly<bool> seenRegisterResult = false;
for (iter.switchToPrev(); !iter.done(); iter.prev()) {
const ABIResult& result = iter.cur();
MOZ_ASSERT(!seenRegisterResult);
// Use rval as a scratch area to hold the extracted result.
rval.set(array->getDenseElement(iter.index()));
if (result.inRegister()) {
// Currently, if a function type has results, there can be only
// one register result. If there is only one result, it is
// returned as a scalar and not an iterable, so we don't get here.
// If there are multiple results, we extract the register result
// and set `argv[0]` set to the extracted result, to be returned by
// register in the stub. The register result follows any stack
// results, so this preserves conversion order.
if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
return false;
}
seenRegisterResult = true;
continue;
}
uint32_t result_size = result.size();
MOZ_ASSERT(result_size == 4 || result_size == 8);
#ifdef DEBUG
if (previousOffset == ~(uint64_t)0) {
previousOffset = (uint64_t)result.stackOffset();
} else {
MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
(uint64_t)result.stackOffset());
previousOffset -= (uint64_t)result_size;
}
#endif
char* loc = stackResultsArea.value() + result.stackOffset();
if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
return false;
}
}
return true;
}
bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
unsigned argc, uint64_t* argv) {
AssertRealmUnchanged aru(cx);
const FuncImport& fi = code().funcImport(funcImportIndex);
const FuncType& funcType = codeMeta().getFuncType(funcImportIndex);
ArgTypeVector argTypes(funcType);
InvokeArgs args(cx);
if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
return false;
}
if (funcType.hasUnexposableArgOrRet()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_VAL_TYPE);
return false;
}
MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
Maybe<char*> stackResultPointer;
size_t lastBoxIndexPlusOne = 0;
{
JS::AutoAssertNoGC nogc;
for (size_t i = 0; i < argc; i++) {
const void* rawArgLoc = &argv[i];
if (argTypes.isSyntheticStackResultPointerArg(i)) {
stackResultPointer = Some(*(char**)rawArgLoc);
continue;
}
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
// Avoid boxes creation not to trigger GC.
if (ToJSValueMayGC(type)) {
lastBoxIndexPlusOne = i + 1;
continue;
}
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
}
// Visit arguments that need to perform allocation in a second loop
// after the rest of arguments are converted.
for (size_t i = 0; i < lastBoxIndexPlusOne; i++) {
if (argTypes.isSyntheticStackResultPointerArg(i)) {
continue;
}
const void* rawArgLoc = &argv[i];
size_t naturalIndex = argTypes.naturalIndex(i);
ValType type = funcType.args()[naturalIndex];
if (!ToJSValueMayGC(type)) {
continue;
}
MOZ_ASSERT(!type.isRefRepr());
// The conversions are safe here because source values are not references
// and will not be moved.
MutableHandleValue argValue = args[naturalIndex];
if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
return false;
}
}
FuncImportInstanceData& import = funcImportInstanceData(funcImportIndex);
Rooted<JSObject*> importCallable(cx, import.callable);
MOZ_ASSERT(cx->realm() == importCallable->nonCCWRealm());
RootedValue fval(cx, ObjectValue(*importCallable));
RootedValue thisv(cx, UndefinedValue());
RootedValue rval(cx);
if (!Call(cx, fval, thisv, args, &rval)) {
return false;
}
if (!UnpackResults(cx, funcType.results(), stackResultPointer, argv, &rval)) {
return false;
}
if (!JitOptions.enableWasmJitExit) {
return true;
}
// The import may already have become optimized.
void* jitExitCode =
code().sharedStubs().segment->base() + fi.jitExitCodeOffset();
if (import.code == jitExitCode) {
return true;
}
if (!importCallable->is<JSFunction>()) {
return true;
}
// Test if the function is JIT compiled.
if (!importCallable->as<JSFunction>().hasBytecode()) {
return true;
}
JSScript* script = importCallable->as<JSFunction>().nonLazyScript();
if (!script->hasJitScript()) {
return true;
}
// Skip if the function does not have a signature that allows for a JIT exit.
if (!funcType.canHaveJitExit()) {
return true;
}
// Let's optimize it!
import.code = jitExitCode;
return true;
}
/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
int32_t argc, uint64_t* argv) {
JSContext* cx = instance->cx();
#ifdef ENABLE_WASM_JSPI
if (IsSuspendableStackActive(cx)) {
return CallImportOnMainThread(cx, instance, funcImportIndex, argc, argv);
}
#endif
return instance->callImport(cx, funcImportIndex, argc, argv);
}
//////////////////////////////////////////////////////////////////////////////
//
// Atomic operations and shared memory.
template <typename ValT, typename PtrT>
static int32_t PerformWait(Instance* instance, uint32_t memoryIndex,
PtrT byteOffset, ValT value, int64_t timeout_ns) {
JSContext* cx = instance->cx();
if (!instance->memory(memoryIndex)->isShared()) {
ReportTrapError(cx, JSMSG_WASM_NONSHARED_WAIT);
return -1;
}
if (byteOffset & (sizeof(ValT) - 1)) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset + sizeof(ValT) >
instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
mozilla::Maybe<mozilla::TimeDuration> timeout;
if (timeout_ns >= 0) {
timeout = mozilla::Some(
mozilla::TimeDuration::FromMicroseconds(double(timeout_ns) / 1000));
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), value, timeout)) {
case FutexThread::WaitResult::OK:
return 0;
case FutexThread::WaitResult::NotEqual:
return 1;
case FutexThread::WaitResult::TimedOut:
return 2;
case FutexThread::WaitResult::Error:
return -1;
default:
MOZ_CRASH();
}
}
/* static */ int32_t Instance::wait_i32_m32(Instance* instance,
uint32_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i32_m64(Instance* instance,
uint64_t byteOffset, int32_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI32M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m32(Instance* instance,
uint32_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M32.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
/* static */ int32_t Instance::wait_i64_m64(Instance* instance,
uint64_t byteOffset, int64_t value,
int64_t timeout_ns,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigWaitI64M64.failureMode == FailureMode::FailOnNegI32);
return PerformWait(instance, memoryIndex, byteOffset, value, timeout_ns);
}
template <typename PtrT>
static int32_t PerformWake(Instance* instance, PtrT byteOffset, int32_t count,
uint32_t memoryIndex) {
JSContext* cx = instance->cx();
// The alignment guard is not in the wasm spec as of 2017-11-02, but is
// considered likely to appear, as 4-byte alignment is required for WAKE by
// the spec's validation algorithm.
if (byteOffset & 3) {
ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
return -1;
}
if (byteOffset >= instance->memory(memoryIndex)->volatileMemoryLength()) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
if (!instance->memory(memoryIndex)->isShared()) {
return 0;
}
MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(memoryIndex),
size_t(byteOffset), int64_t(count));
if (woken > INT32_MAX) {
ReportTrapError(cx, JSMSG_WASM_WAKE_OVERFLOW);
return -1;
}
return int32_t(woken);
}
/* static */ int32_t Instance::wake_m32(Instance* instance, uint32_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
/* static */ int32_t Instance::wake_m64(Instance* instance, uint64_t byteOffset,
int32_t count, uint32_t memoryIndex) {
MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
return PerformWake(instance, byteOffset, count, memoryIndex);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk memory operations.
/* static */ uint32_t Instance::memoryGrow_m32(Instance* instance,
uint32_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM32.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
// It is safe to cast to uint32_t, as all limits have been checked inside
// grow() and will not have been exceeded for a 32-bit memory.
uint32_t ret = uint32_t(WasmMemoryObject::grow(memory, uint64_t(delta), cx));
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint64_t Instance::memoryGrow_m64(Instance* instance,
uint64_t delta,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemoryGrowM64.failureMode == FailureMode::Infallible);
MOZ_ASSERT(!instance->isAsmJS());
JSContext* cx = instance->cx();
Rooted<WasmMemoryObject*> memory(cx, instance->memory(memoryIndex));
uint64_t ret = WasmMemoryObject::grow(memory, delta, cx);
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(
instance->memoryBase(memoryIndex) ==
instance->memory(memoryIndex)->buffer().dataPointerEither());
return ret;
}
/* static */ uint32_t Instance::memorySize_m32(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM32.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
// Ensure that the memory size is no more than 4GiB.
MOZ_ASSERT(pages <= Pages(MaxMemory32PagesValidation));
#endif
return uint32_t(pages.value());
}
/* static */ uint64_t Instance::memorySize_m64(Instance* instance,
uint32_t memoryIndex) {
MOZ_ASSERT(SASigMemorySizeM64.failureMode == FailureMode::Infallible);
// This invariant must hold when running Wasm code. Assert it here so we can
// write tests for cross-realm calls.
DebugOnly<JSContext*> cx = instance->cx();
MOZ_ASSERT(cx->realm() == instance->realm());
Pages pages = instance->memory(memoryIndex)->volatilePages();
#ifdef JS_64BIT
MOZ_ASSERT(pages <= Pages(MaxMemory64PagesValidation));
#endif
return pages.value();
}
template <typename PointerT, typename CopyFuncT, typename IndexT>
inline int32_t WasmMemoryCopy(JSContext* cx, PointerT dstMemBase,
PointerT srcMemBase, size_t dstMemLen,
size_t srcMemLen, IndexT dstByteOffset,
IndexT srcByteOffset, IndexT len,
CopyFuncT memMove) {
if (!MemoryBoundsCheck(dstByteOffset, len, dstMemLen) ||
!MemoryBoundsCheck(srcByteOffset, len, srcMemLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
memMove(dstMemBase + uintptr_t(dstByteOffset),
srcMemBase + uintptr_t(srcByteOffset), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryCopy(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryCopy(cx, memBase, memBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, memmove);
}
template <typename I>
inline int32_t MemoryCopyShared(JSContext* cx, I dstByteOffset, I srcByteOffset,
I len, uint8_t* memBase) {
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
SharedMem<uint8_t*> sharedMemBase = SharedMem<uint8_t*>::shared(memBase);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, sharedMemBase, sharedMemBase, memLen, memLen, dstByteOffset,
srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
/* static */ int32_t Instance::memCopy_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m32(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset,
uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopy_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopyM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
}
/* static */ int32_t Instance::memCopyShared_m64(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset,
uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemCopySharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
}
// Dynamic dispatch to get the length of a memory given just the base and
// whether it is shared or not. This is only used for memCopy_any, where being
// slower is okay.
static inline size_t GetVolatileByteLength(uint8_t* memBase, bool isShared) {
if (isShared) {
return WasmSharedArrayRawBuffer::fromDataPtr(memBase)->volatileByteLength();
}
return WasmArrayRawBuffer::fromDataPtr(memBase)->byteLength();
}
/* static */ int32_t Instance::memCopy_any(Instance* instance,
uint64_t dstByteOffset,
uint64_t srcByteOffset, uint64_t len,
uint32_t dstMemIndex,
uint32_t srcMemIndex) {
MOZ_ASSERT(SASigMemCopyAny.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
using RacyMemMove =
void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
const MemoryInstanceData& dstMemory =
instance->memoryInstanceData(dstMemIndex);
const MemoryInstanceData& srcMemory =
instance->memoryInstanceData(srcMemIndex);
uint8_t* dstMemBase = dstMemory.base;
uint8_t* srcMemBase = srcMemory.base;
size_t dstMemLen = GetVolatileByteLength(dstMemBase, dstMemory.isShared);
size_t srcMemLen = GetVolatileByteLength(srcMemBase, srcMemory.isShared);
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
cx, SharedMem<uint8_t*>::shared(dstMemBase),
SharedMem<uint8_t*>::shared(srcMemBase), dstMemLen, srcMemLen,
dstByteOffset, srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
}
template <typename T, typename F, typename I>
inline int32_t WasmMemoryFill(JSContext* cx, T memBase, size_t memLen,
I byteOffset, uint32_t value, I len, F memSet) {
if (!MemoryBoundsCheck(byteOffset, len, memLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
memSet(memBase + uintptr_t(byteOffset), int(value), size_t(len));
return 0;
}
template <typename I>
inline int32_t MemoryFill(JSContext* cx, I byteOffset, uint32_t value, I len,
uint8_t* memBase) {
const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->byteLength();
return WasmMemoryFill(cx, memBase, memLen, byteOffset, value, len, memset);
}
template <typename I>
inline int32_t MemoryFillShared(JSContext* cx, I byteOffset, uint32_t value,
I len, uint8_t* memBase) {
const WasmSharedArrayRawBuffer* rawBuf =
WasmSharedArrayRawBuffer::fromDataPtr(memBase);
size_t memLen = rawBuf->volatileByteLength();
return WasmMemoryFill(cx, SharedMem<uint8_t*>::shared(memBase), memLen,
byteOffset, value, len,
AtomicOperations::memsetSafeWhenRacy);
}
/* static */ int32_t Instance::memFill_m32(Instance* instance,
uint32_t byteOffset, uint32_t value,
uint32_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m32(Instance* instance,
uint32_t byteOffset,
uint32_t value, uint32_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM32.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFill_m64(Instance* instance,
uint64_t byteOffset, uint32_t value,
uint64_t len, uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFill(cx, byteOffset, value, len, memBase);
}
/* static */ int32_t Instance::memFillShared_m64(Instance* instance,
uint64_t byteOffset,
uint32_t value, uint64_t len,
uint8_t* memBase) {
MOZ_ASSERT(SASigMemFillSharedM64.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
return MemoryFillShared(cx, byteOffset, value, len, memBase);
}
static bool BoundsCheckInit(uint32_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit > memLen || srcOffsetLimit > segLen;
}
static bool BoundsCheckInit(uint64_t dstOffset, uint32_t srcOffset,
uint32_t len, size_t memLen, uint32_t segLen) {
uint64_t dstOffsetLimit = dstOffset + uint64_t(len);
uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
return dstOffsetLimit < dstOffset || dstOffsetLimit > memLen ||
srcOffsetLimit > segLen;
}
template <typename I>
static int32_t MemoryInit(JSContext* cx, Instance* instance,
uint32_t memoryIndex, I dstOffset, uint32_t srcOffset,
uint32_t len, const DataSegment* maybeSeg) {
if (!maybeSeg) {
if (len == 0 && srcOffset == 0) {
return 0;
}
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
const DataSegment& seg = *maybeSeg;
MOZ_RELEASE_ASSERT(!seg.active());
const uint32_t segLen = seg.bytes.length();
WasmMemoryObject* mem = instance->memory(memoryIndex);
const size_t memLen = mem->volatileMemoryLength();
// We are proposing to copy
//
// seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
// to
// memoryBase[ dstOffset .. dstOffset + len - 1 ]
if (BoundsCheckInit(dstOffset, srcOffset, len, memLen, segLen)) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
// The required read/write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memcpySafeWhenRacy(
dataPtr + uintptr_t(dstOffset), (uint8_t*)seg.bytes.begin() + srcOffset,
len);
} else {
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
memcpy(rawBuf + uintptr_t(dstOffset),
(const char*)seg.bytes.begin() + srcOffset, len);
}
return 0;
}
/* static */ int32_t Instance::memInit_m32(Instance* instance,
uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM32.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
/* static */ int32_t Instance::memInit_m64(Instance* instance,
uint64_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex,
uint32_t memIndex) {
MOZ_ASSERT(SASigMemInitM64.failureMode == FailureMode::FailOnNegI32);
MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
"ensured by validation");
JSContext* cx = instance->cx();
return MemoryInit(cx, instance, memIndex, dstOffset, srcOffset, len,
instance->passiveDataSegments_[segIndex]);
}
//////////////////////////////////////////////////////////////////////////////
//
// Bulk table operations.
/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t dstTableIndex,
uint32_t srcTableIndex) {
MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
JSContext* cx = instance->cx();
const SharedTable& srcTable = instance->tables()[srcTableIndex];
uint32_t srcTableLen = srcTable->length();
const SharedTable& dstTable = instance->tables()[dstTableIndex];
uint32_t dstTableLen = dstTable->length();
// Bounds check and deal with arithmetic overflow.
uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
return -1;
}
bool isOOM = false;
if (&srcTable == &dstTable && dstOffset > srcOffset) {
for (uint32_t i = len; i > 0; i--) {
if (!dstTable->copy(cx, *srcTable, dstOffset + (i - 1),
srcOffset + (i - 1))) {
isOOM = true;
break;
}
}
} else if (&srcTable == &dstTable && dstOffset == srcOffset) {
// No-op
} else {
for (uint32_t i = 0; i < len; i++) {
if (!dstTable->copy(cx, *srcTable, dstOffset + i, srcOffset + i)) {
isOOM = true;
break;
}
}
}
if (isOOM) {
return -1;
}
return 0;
}
#ifdef DEBUG
static bool AllSegmentsArePassive(const DataSegmentVector& vec) {
for (const DataSegment* seg : vec) {
if (seg->active()) {
return false;
}
}
return true;
}
#endif
bool Instance::initSegments(JSContext* cx,
const DataSegmentVector& dataSegments,
const ModuleElemSegmentVector& elemSegments) {
MOZ_ASSERT_IF(codeMeta().memories.length() == 0,
AllSegmentsArePassive(dataSegments));
Rooted<WasmInstanceObject*> instanceObj(cx, object());
// Write data/elem segments into memories/tables.
for (const ModuleElemSegment& seg : elemSegments) {
if (seg.active()) {
RootedVal offsetVal(cx);
if (!seg.offset().evaluate(cx, instanceObj, &offsetVal)) {
return false; // OOM
}
const wasm::Table* table = tables()[seg.tableIndex];
uint64_t offset = table->indexType() == IndexType::I32
? offsetVal.get().i32()
: offsetVal.get().i64();
uint64_t tableLength = table->length();
if (offset > tableLength || tableLength - offset < seg.numElements()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_OUT_OF_BOUNDS);
return false;
}
if (!initElems(seg.tableIndex, seg, offset)) {
return false; // OOM
}
}
}
for (const DataSegment* seg : dataSegments) {
if (!seg->active()) {
continue;
}
Rooted<const WasmMemoryObject*> memoryObj(cx, memory(seg->memoryIndex));
size_t memoryLength = memoryObj->volatileMemoryLength();
uint8_t* memoryBase =
memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
RootedVal offsetVal(cx);
if (!seg->offset().evaluate(cx, instanceObj, &offsetVal)) {
return false; // OOM
}
uint64_t offset = memoryObj->indexType() == IndexType::I32
? offsetVal.get().i32()
: offsetVal.get().i64();
uint32_t count = seg->bytes.length();