Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/ArrayBufferObject-inl.h"
#include "vm/ArrayBufferObject.h"
#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Likely.h"
#include "mozilla/Maybe.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/TaggedAnonymousMemory.h"
#include <algorithm> // std::max, std::min
#include <memory> // std::uninitialized_copy_n
#include <string.h>
#if !defined(XP_WIN) && !defined(__wasi__)
# include <sys/mman.h>
#endif
#include <tuple> // std::tuple
#include <type_traits>
#ifdef MOZ_VALGRIND
# include <valgrind/memcheck.h>
#endif
#include "jsnum.h"
#include "jstypes.h"
#include "gc/Barrier.h"
#include "gc/Memory.h"
#include "js/ArrayBuffer.h"
#include "js/Conversions.h"
#include "js/experimental/TypedData.h" // JS_IsArrayBufferViewObject
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/MemoryMetrics.h"
#include "js/Prefs.h"
#include "js/PropertySpec.h"
#include "js/SharedArrayBuffer.h"
#include "js/Wrapper.h"
#include "util/WindowsWrapper.h"
#include "vm/GlobalObject.h"
#include "vm/JSContext.h"
#include "vm/JSObject.h"
#include "vm/SharedArrayObject.h"
#include "vm/Warnings.h" // js::WarnNumberASCII
#include "wasm/WasmConstants.h"
#include "wasm/WasmLog.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmModuleTypes.h"
#include "wasm/WasmProcess.h"
#include "gc/GCContext-inl.h"
#include "gc/Marking-inl.h"
#include "vm/NativeObject-inl.h"
#include "vm/Realm-inl.h" // js::AutoRealm
using JS::ToInt32;
using js::wasm::IndexType;
using js::wasm::Pages;
using mozilla::Atomic;
using mozilla::CheckedInt;
using mozilla::DebugOnly;
using mozilla::Maybe;
using mozilla::Nothing;
using mozilla::Some;
using namespace js;
// Wasm allows large amounts of memory to be reserved at a time. On 64-bit
// platforms (with "huge memories") we reserve around 4GB of virtual address
// space for every wasm memory; on 32-bit platforms we usually do not, but users
// often initialize memories in the hundreds of megabytes.
//
// If too many wasm memories remain live, we run up against system resource
// exhaustion (address space or number of memory map descriptors) - see bug
// 1068684, bug 1073934, bug 1517412, bug 1502733 for details. The limiting case
// seems to be Android on ARM64, where the per-process address space is limited
// to 4TB (39 bits) by the organization of the page tables. An earlier problem
// was Windows Vista Home 64-bit, where the per-process address space is limited
// to 8TB (40 bits). And 32-bit platforms only have 4GB of address space anyway.
//
// Thus we track the amount of memory reserved for wasm, and set a limit per
// process. We trigger GC work when we approach the limit and we throw an OOM
// error if the per-process limit is exceeded. The limit (WasmReservedBytesMax)
// is specific to architecture, OS, and OS configuration.
//
// Since the WasmReservedBytesMax limit is not generally accounted for by
// any existing GC-trigger heuristics, we need an extra heuristic for triggering
// GCs when the caller is allocating memories rapidly without other garbage
// (e.g. bug 1773225). Thus, once the reserved memory crosses the threshold
// WasmReservedBytesStartTriggering, we start triggering GCs every
// WasmReservedBytesPerTrigger bytes. Once we reach
// WasmReservedBytesStartSyncFullGC bytes reserved, we perform expensive
// non-incremental full GCs as a last-ditch effort to avoid unnecessary failure.
// Once we reach WasmReservedBytesMax, we perform further full GCs before giving
// up.
//
// (History: The original implementation only tracked the number of "huge
// memories" allocated by WASM, but this was found to be insufficient because
// 32-bit platforms have similar resource exhaustion issues. We now track
// reserved bytes directly.)
//
// (We also used to reserve significantly more than 4GB for huge memories, but
// this was reduced in bug 1442544.)
// ASAN and TSAN use a ton of vmem for bookkeeping leaving a lot less for the
// program so use a lower limit.
#if defined(MOZ_TSAN) || defined(MOZ_ASAN)
static const uint64_t WasmMemAsanOverhead = 2;
#else
static const uint64_t WasmMemAsanOverhead = 1;
#endif
// WasmReservedStartTriggering + WasmReservedPerTrigger must be well below
// WasmReservedStartSyncFullGC in order to provide enough time for incremental
// GC to do its job.
#if defined(JS_CODEGEN_ARM64) && defined(ANDROID)
static const uint64_t WasmReservedBytesMax =
75 * wasm::HugeMappedSize / WasmMemAsanOverhead;
static const uint64_t WasmReservedBytesStartTriggering =
15 * wasm::HugeMappedSize;
static const uint64_t WasmReservedBytesStartSyncFullGC =
WasmReservedBytesMax - 15 * wasm::HugeMappedSize;
static const uint64_t WasmReservedBytesPerTrigger = 15 * wasm::HugeMappedSize;
#elif defined(WASM_SUPPORTS_HUGE_MEMORY)
static const uint64_t WasmReservedBytesMax =
1000 * wasm::HugeMappedSize / WasmMemAsanOverhead;
static const uint64_t WasmReservedBytesStartTriggering =
100 * wasm::HugeMappedSize;
static const uint64_t WasmReservedBytesStartSyncFullGC =
WasmReservedBytesMax - 100 * wasm::HugeMappedSize;
static const uint64_t WasmReservedBytesPerTrigger = 100 * wasm::HugeMappedSize;
#else // 32-bit (and weird 64-bit platforms without huge memory)
static const uint64_t GiB = 1024 * 1024 * 1024;
static const uint64_t WasmReservedBytesMax =
(4 * GiB) / 2 / WasmMemAsanOverhead;
static const uint64_t WasmReservedBytesStartTriggering = (4 * GiB) / 8;
static const uint64_t WasmReservedBytesStartSyncFullGC =
WasmReservedBytesMax - (4 * GiB) / 8;
static const uint64_t WasmReservedBytesPerTrigger = (4 * GiB) / 8;
#endif
// The total number of bytes reserved for wasm memories.
static Atomic<uint64_t, mozilla::ReleaseAcquire> wasmReservedBytes(0);
// The number of bytes of wasm memory reserved since the last GC trigger.
static Atomic<uint64_t, mozilla::ReleaseAcquire> wasmReservedBytesSinceLast(0);
uint64_t js::WasmReservedBytes() { return wasmReservedBytes; }
[[nodiscard]] static bool CheckArrayBufferTooLarge(JSContext* cx,
uint64_t nbytes) {
// Refuse to allocate too large buffers.
if (MOZ_UNLIKELY(nbytes > ArrayBufferObject::ByteLengthLimit)) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_BAD_ARRAY_LENGTH);
return false;
}
return true;
}
void* js::MapBufferMemory(wasm::IndexType t, size_t mappedSize,
size_t initialCommittedSize) {
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(initialCommittedSize <= mappedSize);
auto failed = mozilla::MakeScopeExit(
[&] { wasmReservedBytes -= uint64_t(mappedSize); });
wasmReservedBytes += uint64_t(mappedSize);
// Test >= to guard against the case where multiple extant runtimes
// race to allocate.
if (wasmReservedBytes >= WasmReservedBytesMax) {
if (OnLargeAllocationFailure) {
OnLargeAllocationFailure();
}
if (wasmReservedBytes >= WasmReservedBytesMax) {
return nullptr;
}
}
#ifdef XP_WIN
void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS);
if (!data) {
return nullptr;
}
if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) {
VirtualFree(data, 0, MEM_RELEASE);
return nullptr;
}
#elif defined(__wasi__)
void* data = nullptr;
if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) {
MOZ_ASSERT(err == ENOMEM);
return nullptr;
}
MOZ_ASSERT(data);
memset(data, 0, mappedSize);
#else // !XP_WIN && !__wasi__
void* data =
MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
if (data == MAP_FAILED) {
return nullptr;
}
// Note we will waste a page on zero-sized memories here
if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) {
munmap(data, mappedSize);
return nullptr;
}
#endif // !XP_WIN && !__wasi__
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(
(unsigned char*)data + initialCommittedSize,
mappedSize - initialCommittedSize);
#endif
failed.release();
return data;
}
bool js::CommitBufferMemory(void* dataEnd, size_t delta) {
MOZ_ASSERT(delta);
MOZ_ASSERT(delta % gc::SystemPageSize() == 0);
#ifdef XP_WIN
if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) {
return false;
}
#elif defined(__wasi__)
// posix_memalign'd memory is already committed
return true;
#else
if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) {
return false;
}
#endif // XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
#endif
return true;
}
bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
size_t newMappedSize) {
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0);
MOZ_ASSERT(newMappedSize >= mappedSize);
#ifdef XP_WIN
void* mappedEnd = (char*)dataPointer + mappedSize;
uint32_t delta = newMappedSize - mappedSize;
if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) {
return false;
}
return true;
#elif defined(__wasi__)
return false;
#elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
return false;
}
return true;
#else
// No mechanism for remapping on MacOS and other Unices. Luckily
// shouldn't need it here as most of these are 64-bit.
return false;
#endif
}
void js::UnmapBufferMemory(wasm::IndexType t, void* base, size_t mappedSize) {
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
#ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
#elif defined(__wasi__)
free(base);
#else
munmap(base, mappedSize);
#endif // XP_WIN
#if defined(MOZ_VALGRIND) && \
defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base,
mappedSize);
#endif
// Untrack reserved memory *after* releasing memory -- otherwise, a race
// condition could enable the creation of unlimited buffers.
wasmReservedBytes -= uint64_t(mappedSize);
}
/*
* ArrayBufferObject
*
* This class holds the underlying raw buffer that the TypedArrayObject classes
* access. It can be created explicitly and passed to a TypedArrayObject, or
* can be created implicitly by constructing a TypedArrayObject with a size.
*/
/*
* ArrayBufferObject (base)
*/
static const JSClassOps ArrayBufferObjectClassOps = {
nullptr, // addProperty
nullptr, // delProperty
nullptr, // enumerate
nullptr, // newEnumerate
nullptr, // resolve
nullptr, // mayResolve
ArrayBufferObject::finalize, // finalize
nullptr, // call
nullptr, // construct
nullptr, // trace
};
static const JSFunctionSpec arraybuffer_functions[] = {
JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0),
JS_FS_END,
};
static const JSPropertySpec arraybuffer_properties[] = {
JS_SELF_HOSTED_SYM_GET(species, "$ArrayBufferSpecies", 0),
JS_PS_END,
};
static const JSFunctionSpec arraybuffer_proto_functions[] = {
JS_SELF_HOSTED_FN("slice", "ArrayBufferSlice", 2, 0),
#ifdef NIGHTLY_BUILD
JS_FN("resize", ArrayBufferObject::resize, 1, 0),
#endif
JS_FN("transfer", ArrayBufferObject::transfer, 0, 0),
JS_FN("transferToFixedLength", ArrayBufferObject::transferToFixedLength, 0,
0),
JS_FS_END,
};
static const JSPropertySpec arraybuffer_proto_properties[] = {
JS_PSG("byteLength", ArrayBufferObject::byteLengthGetter, 0),
#ifdef NIGHTLY_BUILD
JS_PSG("maxByteLength", ArrayBufferObject::maxByteLengthGetter, 0),
JS_PSG("resizable", ArrayBufferObject::resizableGetter, 0),
#endif
JS_PSG("detached", ArrayBufferObject::detachedGetter, 0),
JS_STRING_SYM_PS(toStringTag, "ArrayBuffer", JSPROP_READONLY),
JS_PS_END,
};
static JSObject* CreateArrayBufferPrototype(JSContext* cx, JSProtoKey key) {
return GlobalObject::createBlankPrototype(cx, cx->global(),
&ArrayBufferObject::protoClass_);
}
static const ClassSpec ArrayBufferObjectClassSpec = {
GenericCreateConstructor<ArrayBufferObject::class_constructor, 1,
gc::AllocKind::FUNCTION>,
CreateArrayBufferPrototype,
arraybuffer_functions,
arraybuffer_properties,
arraybuffer_proto_functions,
arraybuffer_proto_properties,
};
static const ClassExtension FixedLengthArrayBufferObjectClassExtension = {
ArrayBufferObject::objectMoved<
FixedLengthArrayBufferObject>, // objectMovedOp
};
static const ClassExtension ResizableArrayBufferObjectClassExtension = {
ArrayBufferObject::objectMoved<
ResizableArrayBufferObject>, // objectMovedOp
};
const JSClass ArrayBufferObject::protoClass_ = {
"ArrayBuffer.prototype",
JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer),
JS_NULL_CLASS_OPS,
&ArrayBufferObjectClassSpec,
};
const JSClass FixedLengthArrayBufferObject::class_ = {
"ArrayBuffer",
JSCLASS_DELAY_METADATA_BUILDER |
JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) |
JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) |
JSCLASS_BACKGROUND_FINALIZE,
&ArrayBufferObjectClassOps,
&ArrayBufferObjectClassSpec,
&FixedLengthArrayBufferObjectClassExtension,
};
const JSClass ResizableArrayBufferObject::class_ = {
"ArrayBuffer",
JSCLASS_DELAY_METADATA_BUILDER |
JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) |
JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) |
JSCLASS_BACKGROUND_FINALIZE,
&ArrayBufferObjectClassOps,
&ArrayBufferObjectClassSpec,
&ResizableArrayBufferObjectClassExtension,
};
static bool IsArrayBuffer(HandleValue v) {
return v.isObject() && v.toObject().is<ArrayBufferObject>();
}
#ifdef NIGHTLY_BUILD
static bool IsResizableArrayBuffer(HandleValue v) {
return v.isObject() && v.toObject().is<ResizableArrayBufferObject>();
}
#endif
MOZ_ALWAYS_INLINE bool ArrayBufferObject::byteLengthGetterImpl(
JSContext* cx, const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
args.rval().setNumber(buffer->byteLength());
return true;
}
bool ArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc,
Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, byteLengthGetterImpl>(cx, args);
}
enum class PreserveResizability : bool { No, Yes };
/**
* ArrayBufferCopyAndDetach ( arrayBuffer, newLength, preserveResizability )
*
*/
static ArrayBufferObject* ArrayBufferCopyAndDetach(
JSContext* cx, Handle<ArrayBufferObject*> arrayBuffer,
Handle<Value> newLength, PreserveResizability preserveResizability) {
// Steps 1-2. (Not applicable in our implementation.)
// Steps 3-4.
uint64_t newByteLength;
if (newLength.isUndefined()) {
// Step 3.a.
newByteLength = arrayBuffer->byteLength();
} else {
// Step 4.a.
if (!ToIndex(cx, newLength, &newByteLength)) {
return nullptr;
}
}
// Step 5.
if (arrayBuffer->isDetached()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_TYPED_ARRAY_DETACHED);
return nullptr;
}
if (arrayBuffer->isLengthPinned()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ARRAYBUFFER_LENGTH_PINNED);
return nullptr;
}
// Steps 6-7.
mozilla::Maybe<size_t> maxByteLength;
if (preserveResizability == PreserveResizability::Yes &&
arrayBuffer->isResizable()) {
auto* resizableBuffer = &arrayBuffer->as<ResizableArrayBufferObject>();
maxByteLength = mozilla::Some(resizableBuffer->maxByteLength());
}
// Step 8.
if (arrayBuffer->hasDefinedDetachKey()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_WASM_NO_TRANSFER);
return nullptr;
}
// Steps 9-16.
//
// 25.1.2.1 AllocateArrayBuffer, step 2.
// 6.2.9.1 CreateByteDataBlock, step 2.
if (!CheckArrayBufferTooLarge(cx, newByteLength)) {
return nullptr;
}
if (maxByteLength) {
if (size_t(newByteLength) > *maxByteLength) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ARRAYBUFFER_LENGTH_LARGER_THAN_MAXIMUM);
return nullptr;
}
Rooted<ResizableArrayBufferObject*> resizableBuffer(
cx, &arrayBuffer->as<ResizableArrayBufferObject>());
return ResizableArrayBufferObject::copyAndDetach(cx, size_t(newByteLength),
resizableBuffer);
}
return ArrayBufferObject::copyAndDetach(cx, size_t(newByteLength),
arrayBuffer);
}
#ifdef NIGHTLY_BUILD
/**
* get ArrayBuffer.prototype.maxByteLength
*
*/
bool ArrayBufferObject::maxByteLengthGetterImpl(JSContext* cx,
const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
// Steps 4-6.
size_t maxByteLength;
if (buffer->isResizable()) {
maxByteLength = buffer->as<ResizableArrayBufferObject>().maxByteLength();
} else {
maxByteLength = buffer->byteLength();
}
MOZ_ASSERT_IF(buffer->isDetached(), maxByteLength == 0);
// Step 7.
args.rval().setNumber(maxByteLength);
return true;
}
/**
* get ArrayBuffer.prototype.maxByteLength
*
*/
bool ArrayBufferObject::maxByteLengthGetter(JSContext* cx, unsigned argc,
Value* vp) {
// Steps 1-3.
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, maxByteLengthGetterImpl>(cx, args);
}
/**
* get ArrayBuffer.prototype.resizable
*
*/
bool ArrayBufferObject::resizableGetterImpl(JSContext* cx,
const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
// Step 4.
auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
args.rval().setBoolean(buffer->isResizable());
return true;
}
/**
* get ArrayBuffer.prototype.resizable
*
*/
bool ArrayBufferObject::resizableGetter(JSContext* cx, unsigned argc,
Value* vp) {
// Steps 1-3.
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, resizableGetterImpl>(cx, args);
}
#endif
/**
* get ArrayBuffer.prototype.detached
*
*/
bool ArrayBufferObject::detachedGetterImpl(JSContext* cx,
const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
// Step 4.
auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
args.rval().setBoolean(buffer->isDetached());
return true;
}
/**
* get ArrayBuffer.prototype.detached
*
*/
bool ArrayBufferObject::detachedGetter(JSContext* cx, unsigned argc,
Value* vp) {
// Steps 1-3.
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, detachedGetterImpl>(cx, args);
}
/**
* ArrayBuffer.prototype.transfer ( [ newLength ] )
*
*/
bool ArrayBufferObject::transferImpl(JSContext* cx, const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
// Steps 1-2.
Rooted<ArrayBufferObject*> buffer(
cx, &args.thisv().toObject().as<ArrayBufferObject>());
auto* newBuffer = ArrayBufferCopyAndDetach(cx, buffer, args.get(0),
PreserveResizability::Yes);
if (!newBuffer) {
return false;
}
args.rval().setObject(*newBuffer);
return true;
}
/**
* ArrayBuffer.prototype.transfer ( [ newLength ] )
*
*/
bool ArrayBufferObject::transfer(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, transferImpl>(cx, args);
}
/**
* ArrayBuffer.prototype.transferToFixedLength ( [ newLength ] )
*
*/
bool ArrayBufferObject::transferToFixedLengthImpl(JSContext* cx,
const CallArgs& args) {
MOZ_ASSERT(IsArrayBuffer(args.thisv()));
// Steps 1-2.
Rooted<ArrayBufferObject*> buffer(
cx, &args.thisv().toObject().as<ArrayBufferObject>());
auto* newBuffer = ArrayBufferCopyAndDetach(cx, buffer, args.get(0),
PreserveResizability::No);
if (!newBuffer) {
return false;
}
args.rval().setObject(*newBuffer);
return true;
}
/**
* ArrayBuffer.prototype.transferToFixedLength ( [ newLength ] )
*
*/
bool ArrayBufferObject::transferToFixedLength(JSContext* cx, unsigned argc,
Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsArrayBuffer, transferToFixedLengthImpl>(cx,
args);
}
#ifdef NIGHTLY_BUILD
/**
* ArrayBuffer.prototype.resize ( newLength )
*
*/
bool ArrayBufferObject::resizeImpl(JSContext* cx, const CallArgs& args) {
MOZ_ASSERT(IsResizableArrayBuffer(args.thisv()));
Rooted<ResizableArrayBufferObject*> obj(
cx, &args.thisv().toObject().as<ResizableArrayBufferObject>());
// Step 4.
uint64_t newByteLength;
if (!ToIndex(cx, args.get(0), &newByteLength)) {
return false;
}
// Step 5.
if (obj->isDetached()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_TYPED_ARRAY_DETACHED);
return false;
}
if (obj->isLengthPinned()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ARRAYBUFFER_LENGTH_PINNED);
return false;
}
// Step 6.
if (newByteLength > obj->maxByteLength()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_ARRAYBUFFER_LENGTH_LARGER_THAN_MAXIMUM);
return false;
}
// Steps 7-15.
obj->resize(size_t(newByteLength));
// Step 16.
args.rval().setUndefined();
return true;
}
/**
* ArrayBuffer.prototype.resize ( newLength )
*
*/
bool ArrayBufferObject::resize(JSContext* cx, unsigned argc, Value* vp) {
// Steps 1-3.
CallArgs args = CallArgsFromVp(argc, vp);
return CallNonGenericMethod<IsResizableArrayBuffer, resizeImpl>(cx, args);
}
#endif
/*
* ArrayBuffer.isView(obj); ES6 (Dec 2013 draft) 24.1.3.1
*/
bool ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setBoolean(args.get(0).isObject() &&
JS_IsArrayBufferViewObject(&args.get(0).toObject()));
return true;
}
// ES2024 draft rev 3a773fc9fae58be023228b13dbbd402ac18eeb6b
// 25.1.4.1 ArrayBuffer ( length [ , options ] )
bool ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc,
Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
// Step 1.
if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) {
return false;
}
// Step 2.
uint64_t byteLength;
if (!ToIndex(cx, args.get(0), &byteLength)) {
return false;
}
// Step 3.
mozilla::Maybe<uint64_t> maxByteLength;
#ifdef NIGHTLY_BUILD
if (JS::Prefs::experimental_arraybuffer_resizable()) {
// Inline call to GetArrayBufferMaxByteLengthOption.
if (args.get(1).isObject()) {
Rooted<JSObject*> options(cx, &args[1].toObject());
Rooted<Value> val(cx);
if (!GetProperty(cx, options, options, cx->names().maxByteLength, &val)) {
return false;
}
if (!val.isUndefined()) {
uint64_t maxByteLengthInt;
if (!ToIndex(cx, val, &maxByteLengthInt)) {
return false;
}
// 25.1.3.1 AllocateArrayBuffer, step 3.a.
if (byteLength > maxByteLengthInt) {
JS_ReportErrorNumberASCII(
cx, GetErrorMessage, nullptr,
JSMSG_ARRAYBUFFER_LENGTH_LARGER_THAN_MAXIMUM);
return false;
}
maxByteLength = mozilla::Some(maxByteLengthInt);
}
}
}
#endif
// Step 4 (Inlined 25.1.3.1 AllocateArrayBuffer).
// 25.1.3.1, step 4 (Inlined 10.1.13 OrdinaryCreateFromConstructor, step 2).
RootedObject proto(cx);
if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_ArrayBuffer,
&proto)) {
return false;
}
// 25.1.3.1, step 5 (Inlined 6.2.9.1 CreateByteDataBlock, step 2).
if (!CheckArrayBufferTooLarge(cx, byteLength)) {
return false;
}
if (maxByteLength) {
// 25.1.3.1, step 8.a.
if (!CheckArrayBufferTooLarge(cx, *maxByteLength)) {
return false;
}
// 25.1.3.1, remaining steps.
auto* bufobj = ResizableArrayBufferObject::createZeroed(
cx, byteLength, *maxByteLength, proto);
if (!bufobj) {
return false;
}
args.rval().setObject(*bufobj);
return true;
}
// 25.1.3.1, remaining steps.
JSObject* bufobj = createZeroed(cx, byteLength, proto);
if (!bufobj) {
return false;
}
args.rval().setObject(*bufobj);
return true;
}
using ArrayBufferContents = UniquePtr<uint8_t[], JS::FreePolicy>;
static ArrayBufferContents AllocateUninitializedArrayBufferContents(
JSContext* cx, size_t nbytes) {
// First attempt a normal allocation.
uint8_t* p =
cx->maybe_pod_arena_malloc<uint8_t>(js::ArrayBufferContentsArena, nbytes);
if (MOZ_UNLIKELY(!p)) {
// Otherwise attempt a large allocation, calling the
// large-allocation-failure callback if necessary.
p = static_cast<uint8_t*>(cx->runtime()->onOutOfMemoryCanGC(
js::AllocFunction::Malloc, js::ArrayBufferContentsArena, nbytes));
if (!p) {
ReportOutOfMemory(cx);
}
}
return ArrayBufferContents(p);
}
static ArrayBufferContents AllocateArrayBufferContents(JSContext* cx,
size_t nbytes) {
// First attempt a normal allocation.
uint8_t* p =
cx->maybe_pod_arena_calloc<uint8_t>(js::ArrayBufferContentsArena, nbytes);
if (MOZ_UNLIKELY(!p)) {
// Otherwise attempt a large allocation, calling the
// large-allocation-failure callback if necessary.
p = static_cast<uint8_t*>(cx->runtime()->onOutOfMemoryCanGC(
js::AllocFunction::Calloc, js::ArrayBufferContentsArena, nbytes));
if (!p) {
ReportOutOfMemory(cx);
}
}
return ArrayBufferContents(p);
}
static ArrayBufferContents ReallocateArrayBufferContents(JSContext* cx,
uint8_t* old,
size_t oldSize,
size_t newSize) {
// First attempt a normal reallocation.
uint8_t* p = cx->maybe_pod_arena_realloc<uint8_t>(
js::ArrayBufferContentsArena, old, oldSize, newSize);
if (MOZ_UNLIKELY(!p)) {
// Otherwise attempt a large allocation, calling the
// large-allocation-failure callback if necessary.
p = static_cast<uint8_t*>(cx->runtime()->onOutOfMemoryCanGC(
js::AllocFunction::Realloc, js::ArrayBufferContentsArena, newSize,
old));
if (!p) {
ReportOutOfMemory(cx);
}
}
return ArrayBufferContents(p);
}
static ArrayBufferContents NewCopiedBufferContents(
JSContext* cx, Handle<ArrayBufferObject*> buffer) {
ArrayBufferContents dataCopy =
AllocateUninitializedArrayBufferContents(cx, buffer->byteLength());
if (dataCopy) {
if (auto count = buffer->byteLength()) {
memcpy(dataCopy.get(), buffer->dataPointer(), count);
}
}
return dataCopy;
}
/* static */
void ArrayBufferObject::detach(JSContext* cx,
Handle<ArrayBufferObject*> buffer) {
cx->check(buffer);
MOZ_ASSERT(!buffer->isPreparedForAsmJS());
MOZ_ASSERT(!buffer->isLengthPinned());
// Update all views of the buffer to account for the buffer having been
// detached, and clear the buffer's data and list of views.
auto& innerViews = ObjectRealm::get(buffer).innerViews.get();
if (InnerViewTable::ViewVector* views =
innerViews.maybeViewsUnbarriered(buffer)) {
for (size_t i = 0; i < views->length(); i++) {
JSObject* view = (*views)[i];
view->as<ArrayBufferViewObject>().notifyBufferDetached();
}
innerViews.removeViews(buffer);
}
if (JSObject* view = buffer->firstView()) {
view->as<ArrayBufferViewObject>().notifyBufferDetached();
buffer->setFirstView(nullptr);
}
if (buffer->dataPointer()) {
buffer->releaseData(cx->gcContext());
buffer->setDataPointer(BufferContents::createNoData());
}
buffer->setByteLength(0);
buffer->setIsDetached();
if (buffer->isResizable()) {
buffer->as<ResizableArrayBufferObject>().setMaxByteLength(0);
}
}
void ResizableArrayBufferObject::resize(size_t newByteLength) {
MOZ_ASSERT(!isPreparedForAsmJS());
MOZ_ASSERT(!isWasm());
MOZ_ASSERT(!isDetached());
MOZ_ASSERT(!isLengthPinned());
MOZ_ASSERT(isResizable());
MOZ_ASSERT(newByteLength <= maxByteLength());
// Clear the bytes between `data[newByteLength..oldByteLength]` when
// shrinking the buffer. We don't need to clear any bytes when growing the
// buffer, because the new space was either initialized to zero when creating
// the buffer, or a prior shrink zeroed it out here.
size_t oldByteLength = byteLength();
if (newByteLength < oldByteLength) {
size_t nbytes = oldByteLength - newByteLength;
memset(dataPointer() + newByteLength, 0, nbytes);
}
setByteLength(newByteLength);
// Update all views of the buffer to account for the buffer having been
// resized.
auto& innerViews = ObjectRealm::get(this).innerViews.get();
if (InnerViewTable::ViewVector* views =
innerViews.maybeViewsUnbarriered(this)) {
for (auto& view : *views) {
view->notifyBufferResized();
}
}
if (auto* view = firstView()) {
view->as<ArrayBufferViewObject>().notifyBufferResized();
}
}
/* clang-format off */
/*
* [SMDOC] WASM Linear Memory structure
*
* Wasm Raw Buf Linear Memory Structure
*
* The linear heap in Wasm is an mmaped array buffer. Several constants manage
* its lifetime:
*
* - byteLength - the wasm-visible current length of the buffer in
* bytes. Accesses in the range [0, byteLength] succeed. May only increase.
*
* - boundsCheckLimit - the size against which we perform bounds checks. The
* value of this depends on the bounds checking strategy chosen for the array
* buffer and the specific bounds checking semantics. For asm.js code and
* for wasm code running with explicit bounds checking, it is the always the
* same as the byteLength. For wasm code using the huge-memory trick, it is
* always wasm::GuardSize smaller than mappedSize.
*
* See also "Linear memory addresses and bounds checking" in
* wasm/WasmMemory.cpp.
*
* See also WasmMemoryObject::boundsCheckLimit().
*
* - sourceMaxSize - the optional declared limit on how far byteLength can grow
* in pages. This is the unmodified maximum size from the source module or
* JS-API invocation. This may not be representable in byte lengths, nor
* feasible for a module to actually grow to due to implementation limits.
* It is used for correct linking checks and js-types reflection.
*
* - clampedMaxSize - the maximum size on how far the byteLength can grow in
* pages. This value respects implementation limits and is always
* representable as a byte length. Every memory has a clampedMaxSize, even if
* no maximum was specified in source. When a memory has no sourceMaxSize,
* the clampedMaxSize will be the maximum amount of memory that can be grown
* to while still respecting implementation limits.
*
* - mappedSize - the actual mmapped size. Access in the range [0, mappedSize]
* will either succeed, or be handled by the wasm signal handlers. If
* sourceMaxSize is present at initialization, then we attempt to map the
* whole clampedMaxSize. Otherwise we only map the region needed for the
* initial size.
*
* The below diagram shows the layout of the wasm heap. The wasm-visible portion
* of the heap starts at 0. There is one extra page prior to the start of the
* wasm heap which contains the WasmArrayRawBuffer struct at its end (i.e. right
* before the start of the WASM heap).
*
* WasmArrayRawBuffer
* \ ArrayBufferObject::dataPointer()
* \ /
* \ |
* ______|_|______________________________________________________
* |______|_|______________|___________________|___________________|
* 0 byteLength clampedMaxSize mappedSize
*
* \_______________________/
* COMMITED
* \_____________________________________/
* SLOP
* \______________________________________________________________/
* MAPPED
*
* Invariants on byteLength, clampedMaxSize, and mappedSize:
* - byteLength only increases
* - 0 <= byteLength <= clampedMaxSize <= mappedSize
* - if sourceMaxSize is not specified, mappedSize may grow.
* It is otherwise constant.
* - initialLength <= clampedMaxSize <= sourceMaxSize (if present)
* - clampedMaxSize <= wasm::MaxMemoryPages()
*
* Invariants on boundsCheckLimit:
* - for wasm code with the huge-memory trick,
* clampedMaxSize <= boundsCheckLimit <= mappedSize
* - for asm.js code or wasm with explicit bounds checking,
* byteLength == boundsCheckLimit <= clampedMaxSize
* - on ARM, boundsCheckLimit must be a valid ARM immediate.
* - if sourceMaxSize is not specified, boundsCheckLimit may grow as
* mappedSize grows. They are otherwise constant.
* NOTE: For asm.js on 32-bit platforms and on all platforms when running with
* explicit bounds checking, we guarantee that
*
* byteLength == maxSize == boundsCheckLimit == mappedSize
*
* That is, signal handlers will not be invoked.
*
* The region between byteLength and mappedSize is the SLOP - an area where we use
* signal handlers to catch things that slip by bounds checks. Logically it has
* two parts:
*
* - from byteLength to boundsCheckLimit - this part of the SLOP serves to catch
* accesses to memory we have reserved but not yet grown into. This allows us
* to grow memory up to max (when present) without having to patch/update the
* bounds checks.
*
* - from boundsCheckLimit to mappedSize - this part of the SLOP allows us to
* bounds check against base pointers and fold some constant offsets inside
* loads. This enables better Bounds Check Elimination. See "Linear memory
* addresses and bounds checking" in wasm/WasmMemory.cpp.
*
*/
/* clang-format on */
[[nodiscard]] bool WasmArrayRawBuffer::growToPagesInPlace(Pages newPages) {
size_t newSize = newPages.byteLength();
size_t oldSize = byteLength();
MOZ_ASSERT(newSize >= oldSize);
MOZ_ASSERT(newPages <= clampedMaxPages());
MOZ_ASSERT(newSize <= mappedSize());
size_t delta = newSize - oldSize;
MOZ_ASSERT(delta % wasm::PageSize == 0);
uint8_t* dataEnd = dataPointer() + oldSize;
MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
if (delta && !CommitBufferMemory(dataEnd, delta)) {
return false;
}
length_ = newSize;
return true;
}
bool WasmArrayRawBuffer::extendMappedSize(Pages maxPages) {
size_t newMappedSize = wasm::ComputeMappedSize(maxPages);
MOZ_ASSERT(mappedSize_ <= newMappedSize);
if (mappedSize_ == newMappedSize) {
return true;
}
if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) {
return false;
}
mappedSize_ = newMappedSize;
return true;
}
void WasmArrayRawBuffer::tryGrowMaxPagesInPlace(Pages deltaMaxPages) {
Pages newMaxPages = clampedMaxPages_;
DebugOnly<bool> valid = newMaxPages.checkedIncrement(deltaMaxPages);
// Caller must ensure increment does not overflow or increase over the
// specified maximum pages.
MOZ_ASSERT(valid);
MOZ_ASSERT_IF(sourceMaxPages_.isSome(), newMaxPages <= *sourceMaxPages_);
if (!extendMappedSize(newMaxPages)) {
return;
}
clampedMaxPages_ = newMaxPages;
}
void WasmArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) {
uint8_t* memBase = dataPointer();
// The caller is responsible for ensuring these conditions are met; see this
// function's comment in ArrayBufferObject.h.
MOZ_ASSERT(byteOffset % wasm::PageSize == 0);
MOZ_ASSERT(byteLen % wasm::PageSize == 0);
MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen),
byteLength()));
// Discarding zero bytes "succeeds" with no effect.
if (byteLen == 0) {
return;
}
void* addr = memBase + uintptr_t(byteOffset);
// On POSIX-ish platforms, we discard memory by overwriting previously-mapped
// pages with freshly-mapped pages (which are all zeroed). The operating
// system recognizes this and decreases the process RSS, and eventually
// collects the abandoned physical pages.
//
// On Windows, committing over previously-committed pages has no effect, and
// the memory must be explicitly decommitted first. This is not the same as an
// munmap; the address space is still reserved.
#ifdef XP_WIN
if (!VirtualFree(addr, byteLen, MEM_DECOMMIT)) {
MOZ_CRASH("wasm discard: failed to decommit memory");
}
if (!VirtualAlloc(addr, byteLen, MEM_COMMIT, PAGE_READWRITE)) {
MOZ_CRASH("wasm discard: decommitted memory but failed to recommit");
};
#elif defined(__wasi__)
memset(addr, 0, byteLen);
#else // !XP_WIN
void* data = MozTaggedAnonymousMmap(addr, byteLen, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0,
"wasm-reserved");
if (data == MAP_FAILED) {
MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken");
}
#endif
}
/* static */
WasmArrayRawBuffer* WasmArrayRawBuffer::AllocateWasm(
IndexType indexType, Pages initialPages, Pages clampedMaxPages,
const Maybe<Pages>& sourceMaxPages, const Maybe<size_t>& mapped) {
// Prior code has asserted that initial pages is within our implementation
// limits (wasm::MaxMemoryPages) and we can assume it is a valid size_t.
MOZ_ASSERT(initialPages.hasByteLength());
size_t numBytes = initialPages.byteLength();
// If there is a specified maximum, attempt to map the whole range for
// clampedMaxPages. Or else map only what's required for initialPages.
Pages initialMappedPages =
sourceMaxPages.isSome() ? clampedMaxPages : initialPages;
// Use an override mapped size, or else compute the mapped size from
// initialMappedPages.
size_t mappedSize =
mapped.isSome() ? *mapped : wasm::ComputeMappedSize(initialMappedPages);
MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
MOZ_RELEASE_ASSERT(numBytes <= SIZE_MAX - gc::SystemPageSize());
MOZ_RELEASE_ASSERT(initialPages <= clampedMaxPages);
MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
void* data = MapBufferMemory(indexType, (size_t)mappedSizeWithHeader,
(size_t)numBytesWithHeader);
if (!data) {
return nullptr;
}
uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize();
uint8_t* header = base - sizeof(WasmArrayRawBuffer);
auto rawBuf = new (header) WasmArrayRawBuffer(
indexType, base, clampedMaxPages, sourceMaxPages, mappedSize, numBytes);
return rawBuf;
}
/* static */
void WasmArrayRawBuffer::Release(void* mem) {
WasmArrayRawBuffer* header =
(WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer));
MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize());
size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize();
static_assert(std::is_trivially_destructible_v<WasmArrayRawBuffer>,
"no need to call the destructor");
UnmapBufferMemory(header->indexType(), header->basePointer(),
mappedSizeWithHeader);
}
WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const {
MOZ_RELEASE_ASSERT(kind_ == WASM);
return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
}
template <typename ObjT, typename RawbufT>
static ArrayBufferObjectMaybeShared* CreateSpecificWasmBuffer(
JSContext* cx, const wasm::MemoryDesc& memory) {
bool useHugeMemory = wasm::IsHugeMemoryEnabled(memory.indexType());
Pages initialPages = memory.initialPages();
Maybe<Pages> sourceMaxPages = memory.maximumPages();
Pages clampedMaxPages = wasm::ClampedMaxPages(
memory.indexType(), initialPages, sourceMaxPages, useHugeMemory);
Maybe<size_t> mappedSize;
#ifdef WASM_SUPPORTS_HUGE_MEMORY
// Override the mapped size if we are using huge memory. If we are not, then
// it will be calculated by the raw buffer we are using.
if (useHugeMemory) {
mappedSize = Some(wasm::HugeMappedSize);
}
#endif
RawbufT* buffer =
RawbufT::AllocateWasm(memory.limits.indexType, initialPages,
clampedMaxPages, sourceMaxPages, mappedSize);
if (!buffer) {
if (useHugeMemory) {
WarnNumberASCII(cx, JSMSG_WASM_HUGE_MEMORY_FAILED);
if (cx->isExceptionPending()) {
cx->clearPendingException();
}
ReportOutOfMemory(cx);
return nullptr;
}
// If we fail, and have a sourceMaxPages, try to reserve the biggest
// chunk in the range [initialPages, clampedMaxPages) using log backoff.
if (!sourceMaxPages) {
wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed",
initialPages.value());
ReportOutOfMemory(cx);
return nullptr;
}
uint64_t cur = clampedMaxPages.value() / 2;
for (; Pages(cur) > initialPages; cur /= 2) {
buffer = RawbufT::AllocateWasm(memory.limits.indexType, initialPages,
Pages(cur), sourceMaxPages, mappedSize);
if (buffer) {
break;
}
}
if (!buffer) {
wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed",
initialPages.value());
ReportOutOfMemory(cx);
return nullptr;
}
// Try to grow our chunk as much as possible.
for (size_t d = cur / 2; d >= 1; d /= 2) {
buffer->tryGrowMaxPagesInPlace(Pages(d));
}
}
// ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case
// of failure.
Rooted<ArrayBufferObjectMaybeShared*> object(
cx, ObjT::createFromNewRawBuffer(cx, buffer, initialPages.byteLength()));
if (!object) {
return nullptr;
}
// See MaximumLiveMappedBuffers comment above.
if (wasmReservedBytes > WasmReservedBytesStartSyncFullGC) {
JS::PrepareForFullGC(cx);
JS::NonIncrementalGC(cx, JS::GCOptions::Normal,
JS::GCReason::TOO_MUCH_WASM_MEMORY);
wasmReservedBytesSinceLast = 0;
} else if (wasmReservedBytes > WasmReservedBytesStartTriggering) {
wasmReservedBytesSinceLast += uint64_t(buffer->mappedSize());
if (wasmReservedBytesSinceLast > WasmReservedBytesPerTrigger) {
(void)cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY);
wasmReservedBytesSinceLast = 0;
}
} else {
wasmReservedBytesSinceLast = 0;
}
// Log the result with details on the memory allocation
if (sourceMaxPages) {
if (useHugeMemory) {
wasm::Log(cx,
"new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64
" pages}) succeeded",
initialPages.value(), sourceMaxPages->value());
} else {
wasm::Log(cx,
"new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64
" pages}) succeeded "
"with internal maximum of %" PRIu64 " pages",
initialPages.value(), sourceMaxPages->value(),
object->wasmClampedMaxPages().value());
}
} else {
wasm::Log(cx, "new Memory({initial:%" PRIu64 " pages}) succeeded",
initialPages.value());
}
return object;
}
ArrayBufferObjectMaybeShared* js::CreateWasmBuffer(
JSContext* cx, const wasm::MemoryDesc& memory) {
MOZ_RELEASE_ASSERT(memory.initialPages() <=
wasm::MaxMemoryPages(memory.indexType()));
MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers);
if (memory.isShared()) {
if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
JSMSG_WASM_NO_SHMEM_LINK);
return nullptr;
}
return CreateSpecificWasmBuffer<SharedArrayBufferObject,
WasmSharedArrayRawBuffer>(cx, memory);
}
return CreateSpecificWasmBuffer<ArrayBufferObject, WasmArrayRawBuffer>(
cx, memory);
}
bool ArrayBufferObject::prepareForAsmJS() {
MOZ_ASSERT(byteLength() % wasm::PageSize == 0,
"prior size checking should have guaranteed page-size multiple");
MOZ_ASSERT(byteLength() > 0,
"prior size checking should have excluded empty buffers");
MOZ_ASSERT(!isResizable(),
"prior checks should have excluded resizable buffers");
switch (bufferKind()) {
case MALLOCED_ARRAYBUFFER_CONTENTS_ARENA:
case MALLOCED_UNKNOWN_ARENA:
case MAPPED:
case EXTERNAL:
// It's okay if this uselessly sets the flag a second time.
setIsPreparedForAsmJS();
return true;
case INLINE_DATA:
static_assert(
wasm::PageSize > FixedLengthArrayBufferObject::MaxInlineBytes,
"inline data must be too small to be a page size multiple");
MOZ_ASSERT_UNREACHABLE(
"inline-data buffers should be implicitly excluded by size checks");
return false;
case NO_DATA:
MOZ_ASSERT_UNREACHABLE(
"size checking should have excluded detached or empty buffers");
return false;
// asm.js code and associated buffers are potentially long-lived. Yet a
// buffer of user-owned data *must* be detached by the user before the
// user-owned data is disposed. No caller wants to use a user-owned
// ArrayBuffer with asm.js, so just don't support this and avoid a mess of
// complexity.
case USER_OWNED:
// wasm buffers can be detached at any time.
case WASM:
MOZ_ASSERT(!isPreparedForAsmJS());
return false;
}
MOZ_ASSERT_UNREACHABLE("non-exhaustive kind-handling switch?");
return false;
}
ArrayBufferObject::BufferContents ArrayBufferObject::createMappedContents(
int fd, size_t offset, size_t length) {
void* data =
gc::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
return BufferContents::createMapped(data);
}
uint8_t* FixedLengthArrayBufferObject::inlineDataPointer() const {
return static_cast<uint8_t*>(fixedData(JSCLASS_RESERVED_SLOTS(&class_)));
}
uint8_t* ResizableArrayBufferObject::inlineDataPointer() const {
return static_cast<uint8_t*>(fixedData(JSCLASS_RESERVED_SLOTS(&class_)));
}
uint8_t* ArrayBufferObject::dataPointer() const {
return static_cast<uint8_t*>(getFixedSlot(DATA_SLOT).toPrivate());
}
SharedMem<uint8_t*> ArrayBufferObject::dataPointerShared() const {
return SharedMem<uint8_t*>::unshared(getFixedSlot(DATA_SLOT).toPrivate());
}
ArrayBufferObject::FreeInfo* ArrayBufferObject::freeInfo() const {
MOZ_ASSERT(isExternal());
MOZ_ASSERT(!isResizable());
auto* data = as<FixedLengthArrayBufferObject>().inlineDataPointer();
return reinterpret_cast<FreeInfo*>(data);
}
void ArrayBufferObject::releaseData(JS::GCContext* gcx) {
switch (bufferKind()) {
case INLINE_DATA:
// Inline data doesn't require releasing.
break;
case MALLOCED_ARRAYBUFFER_CONTENTS_ARENA:
case MALLOCED_UNKNOWN_ARENA:
gcx->free_(this, dataPointer(), associatedBytes(),
MemoryUse::ArrayBufferContents);
break;
case NO_DATA:
// There's nothing to release if there's no data.
MOZ_ASSERT(dataPointer() == nullptr);
break;
case USER_OWNED:
// User-owned data is released by, well, the user.
break;
case MAPPED:
gc::DeallocateMappedContent(dataPointer(), byteLength());
gcx->removeCellMemory(this, associatedBytes(),
MemoryUse::ArrayBufferContents);
break;
case WASM:
WasmArrayRawBuffer::Release(dataPointer());
gcx->removeCellMemory(this, byteLength(), MemoryUse::ArrayBufferContents);
break;
case EXTERNAL:
MOZ_ASSERT(freeInfo()->freeFunc);
{
// The analyzer can't know for sure whether the embedder-supplied
// free function will GC. We give the analyzer a hint here.
// (Doing a GC in the free function is considered a programmer
// error.)
JS::AutoSuppressGCAnalysis nogc;
freeInfo()->freeFunc(dataPointer(), freeInfo()->freeUserData);
}
break;
}
}
void ArrayBufferObject::setDataPointer(BufferContents contents) {
setFixedSlot(DATA_SLOT, PrivateValue(contents.data()));
setFlags((flags() & ~KIND_MASK) | contents.kind());
if (isExternal()) {
auto info = freeInfo();
info->freeFunc = contents.freeFunc();
info->freeUserData = contents.freeUserData();
}
}
size_t ArrayBufferObject::byteLength() const {
return size_t(getFixedSlot(BYTE_LENGTH_SLOT).toPrivate());
}
inline size_t ArrayBufferObject::associatedBytes() const {
if (isMalloced()) {
if (isResizable()) {
return as<ResizableArrayBufferObject>().maxByteLength();
}
return byteLength();
}
if (isMapped()) {
return RoundUp(byteLength(), js::gc::SystemPageSize());
}
MOZ_CRASH("Unexpected buffer kind");
}
void ArrayBufferObject::setByteLength(size_t length) {
MOZ_ASSERT(length <= ArrayBufferObject::ByteLengthLimit);
setFixedSlot(BYTE_LENGTH_SLOT, PrivateValue(length));
}
size_t ArrayBufferObject::wasmMappedSize() const {
if (isWasm()) {
return contents().wasmBuffer()->mappedSize();
}
return byteLength();
}
IndexType ArrayBufferObject::wasmIndexType() const {
if (isWasm()) {
return contents().wasmBuffer()->indexType();
}
MOZ_ASSERT(isPreparedForAsmJS());
return wasm::IndexType::I32;
}
Pages ArrayBufferObject::wasmPages() const {
if (isWasm()) {
return contents().wasmBuffer()->pages();
}
MOZ_ASSERT(isPreparedForAsmJS());
return Pages::fromByteLengthExact(byteLength());
}
Pages ArrayBufferObject::wasmClampedMaxPages() const {
if (isWasm()) {
return contents().wasmBuffer()->clampedMaxPages();
}
MOZ_ASSERT(isPreparedForAsmJS());
return Pages::fromByteLengthExact(byteLength());
}
Maybe<Pages> ArrayBufferObject::wasmSourceMaxPages() const {
if (isWasm()) {
return contents().wasmBuffer()->sourceMaxPages();
}
MOZ_ASSERT(isPreparedForAsmJS());
return Some<Pages>(Pages::fromByteLengthExact(byteLength()));
}
size_t js::WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) {
if (buf->is<ArrayBufferObject>()) {
return buf->as<ArrayBufferObject>().wasmMappedSize();
}
return buf->as<SharedArrayBufferObject>().wasmMappedSize();
}
IndexType js::WasmArrayBufferIndexType(
const ArrayBufferObjectMaybeShared* buf) {
if (buf->is<ArrayBufferObject>()) {
return buf->as<ArrayBufferObject>().wasmIndexType();
}
return buf->as<SharedArrayBufferObject>().wasmIndexType();
}
Pages js::WasmArrayBufferPages(const ArrayBufferObjectMaybeShared* buf) {
if (buf->is<ArrayBufferObject>()) {
return buf->as<ArrayBufferObject>().wasmPages();
}
return buf->as<SharedArrayBufferObject>().volatileWasmPages();
}
Pages js::WasmArrayBufferClampedMaxPages(
const ArrayBufferObjectMaybeShared* buf) {
if (buf->is<ArrayBufferObject>()) {
return buf->as<ArrayBufferObject>().wasmClampedMaxPages();
}
return buf->as<SharedArrayBufferObject>().wasmClampedMaxPages();
}
Maybe<Pages> js::WasmArrayBufferSourceMaxPages(
const ArrayBufferObjectMaybeShared* buf) {
if (buf->is<ArrayBufferObject>()) {
return buf->as<ArrayBufferObject>().wasmSourceMaxPages();
}
return Some(buf->as<SharedArrayBufferObject>().wasmSourceMaxPages());
}
static void CheckStealPreconditions(Handle<ArrayBufferObject*> buffer,
JSContext* cx) {
cx->check(buffer);
MOZ_ASSERT(!buffer->isDetached(), "can't steal from a detached buffer");
MOZ_ASSERT(!buffer->isLengthPinned(),
"can't steal from a buffer with a pinned length");
MOZ_ASSERT(!buffer->isPreparedForAsmJS(),
"asm.js-prepared buffers don't have detachable/stealable data");
}
/* static */
ArrayBufferObject* ArrayBufferObject::wasmGrowToPagesInPlace(
wasm::IndexType t, Pages newPages, Handle<ArrayBufferObject*> oldBuf,
JSContext* cx) {
if (oldBuf->isLengthPinned()) {
return nullptr;
}