Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "vm/NativeObject-inl.h"
#include "mozilla/Casting.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/Maybe.h"
#include <algorithm>
#include <iterator>
#include "gc/MaybeRooted.h"
#include "gc/StableCellHasher.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
#include "js/Printer.h" // js::GenericPrinter
#include "js/Value.h"
#include "vm/EqualityOperations.h" // js::SameValue
#include "vm/GetterSetter.h" // js::GetterSetter
#include "vm/Interpreter.h" // js::CallGetter, js::CallSetter
#include "vm/JSONPrinter.h" // js::JSONPrinter
#include "vm/PlainObject.h" // js::PlainObject
#include "vm/TypedArrayObject.h"
#include "vm/Watchtower.h"
#ifdef ENABLE_RECORD_TUPLE
# include "builtin/RecordObject.h"
# include "builtin/TupleObject.h"
# include "vm/RecordTupleShared.h"
#endif
#include "gc/Nursery-inl.h"
#include "vm/JSObject-inl.h"
#include "vm/Shape-inl.h"
using namespace js;
using mozilla::CheckedInt;
using mozilla::PodCopy;
using mozilla::RoundUpPow2;
struct EmptyObjectElements {
const ObjectElements emptyElementsHeader;
// Add an extra (unused) Value to make sure an out-of-bounds index when
// masked (resulting in index 0) accesses valid memory.
const Value val;
public:
constexpr EmptyObjectElements()
: emptyElementsHeader(0, 0), val(UndefinedValue()) {}
explicit constexpr EmptyObjectElements(ObjectElements::SharedMemory shmem)
: emptyElementsHeader(0, 0, shmem), val(UndefinedValue()) {}
};
static constexpr EmptyObjectElements emptyElementsHeader;
/* Objects with no elements share one empty set of elements. */
HeapSlot* const js::emptyObjectElements = reinterpret_cast<HeapSlot*>(
uintptr_t(&emptyElementsHeader) + sizeof(ObjectElements));
static constexpr EmptyObjectElements emptyElementsHeaderShared(
ObjectElements::SharedMemory::IsShared);
/* Objects with no elements share one empty set of elements. */
HeapSlot* const js::emptyObjectElementsShared = reinterpret_cast<HeapSlot*>(
uintptr_t(&emptyElementsHeaderShared) + sizeof(ObjectElements));
struct EmptyObjectSlots : public ObjectSlots {
explicit constexpr EmptyObjectSlots(size_t dictionarySlotSpan)
: ObjectSlots(0, dictionarySlotSpan, NoUniqueIdInSharedEmptySlots) {}
};
static constexpr EmptyObjectSlots emptyObjectSlotsHeaders[17] = {
EmptyObjectSlots(0), EmptyObjectSlots(1), EmptyObjectSlots(2),
EmptyObjectSlots(3), EmptyObjectSlots(4), EmptyObjectSlots(5),
EmptyObjectSlots(6), EmptyObjectSlots(7), EmptyObjectSlots(8),
EmptyObjectSlots(9), EmptyObjectSlots(10), EmptyObjectSlots(11),
EmptyObjectSlots(12), EmptyObjectSlots(13), EmptyObjectSlots(14),
EmptyObjectSlots(15), EmptyObjectSlots(16)};
static_assert(std::size(emptyObjectSlotsHeaders) ==
NativeObject::MAX_FIXED_SLOTS + 1);
MOZ_RUNINIT HeapSlot* const js::emptyObjectSlotsForDictionaryObject[17] = {
emptyObjectSlotsHeaders[0].slots(), emptyObjectSlotsHeaders[1].slots(),
emptyObjectSlotsHeaders[2].slots(), emptyObjectSlotsHeaders[3].slots(),
emptyObjectSlotsHeaders[4].slots(), emptyObjectSlotsHeaders[5].slots(),
emptyObjectSlotsHeaders[6].slots(), emptyObjectSlotsHeaders[7].slots(),
emptyObjectSlotsHeaders[8].slots(), emptyObjectSlotsHeaders[9].slots(),
emptyObjectSlotsHeaders[10].slots(), emptyObjectSlotsHeaders[11].slots(),
emptyObjectSlotsHeaders[12].slots(), emptyObjectSlotsHeaders[13].slots(),
emptyObjectSlotsHeaders[14].slots(), emptyObjectSlotsHeaders[15].slots(),
emptyObjectSlotsHeaders[16].slots()};
static_assert(std::size(emptyObjectSlotsForDictionaryObject) ==
NativeObject::MAX_FIXED_SLOTS + 1);
MOZ_RUNINIT HeapSlot* const js::emptyObjectSlots =
emptyObjectSlotsForDictionaryObject[0];
#ifdef DEBUG
bool NativeObject::canHaveNonEmptyElements() {
return !this->is<TypedArrayObject>();
}
#endif // DEBUG
/* static */
void ObjectElements::PrepareForPreventExtensions(JSContext* cx,
NativeObject* obj) {
if (!obj->hasEmptyElements()) {
obj->shrinkCapacityToInitializedLength(cx);
}
// shrinkCapacityToInitializedLength ensures there are no shifted elements.
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
}
/* static */
void ObjectElements::PreventExtensions(NativeObject* obj) {
MOZ_ASSERT(!obj->isExtensible());
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
if (!obj->hasEmptyElements()) {
obj->getElementsHeader()->setNotExtensible();
}
}
/* static */
bool ObjectElements::FreezeOrSeal(JSContext* cx, Handle<NativeObject*> obj,
IntegrityLevel level) {
MOZ_ASSERT_IF(level == IntegrityLevel::Frozen && obj->is<ArrayObject>(),
!obj->as<ArrayObject>().lengthIsWritable());
MOZ_ASSERT(!obj->isExtensible());
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
if (obj->hasEmptyElements() || obj->denseElementsAreFrozen()) {
return true;
}
if (level == IntegrityLevel::Frozen) {
if (!JSObject::setFlag(cx, obj, ObjectFlag::FrozenElements)) {
return false;
}
}
if (!obj->denseElementsAreSealed()) {
obj->getElementsHeader()->seal();
}
if (level == IntegrityLevel::Frozen) {
obj->getElementsHeader()->freeze();
}
return true;
}
#if defined(DEBUG) || defined(JS_JITSPEW)
template <typename KnownF, typename UnknownF>
void ForEachObjectElementsFlag(uint16_t flags, KnownF known, UnknownF unknown) {
for (uint16_t i = 1; i; i = i << 1) {
if (!(flags & i)) {
continue;
}
switch (ObjectElements::Flags(flags & i)) {
case ObjectElements::Flags::FIXED:
known("FIXED");
break;
case ObjectElements::Flags::NONWRITABLE_ARRAY_LENGTH:
known("NONWRITABLE_ARRAY_LENGTH");
break;
# ifdef ENABLE_RECORD_TUPLE
case ObjectElements::Flags::TUPLE_IS_ATOMIZED:
known("TUPLE_IS_ATOMIZED");
break;
# endif
case ObjectElements::Flags::SHARED_MEMORY:
known("SHARED_MEMORY");
break;
case ObjectElements::Flags::NOT_EXTENSIBLE:
known("NOT_EXTENSIBLE");
break;
case ObjectElements::Flags::SEALED:
known("SEALED");
break;
case ObjectElements::Flags::FROZEN:
known("FROZEN");
break;
case ObjectElements::Flags::NON_PACKED:
known("NON_PACKED");
break;
case ObjectElements::Flags::MAYBE_IN_ITERATION:
known("MAYBE_IN_ITERATION");
break;
default:
unknown(i);
break;
}
}
}
void ObjectElements::dumpStringContent(js::GenericPrinter& out) const {
out.printf("<(js::ObjectElements*)0x%p, flags=[", this);
bool first = true;
ForEachObjectElementsFlag(
flags,
[&](const char* name) {
if (!first) {
out.put(", ");
}
first = false;
out.put(name);
},
[&](uint16_t value) {
if (!first) {
out.put(", ");
}
first = false;
out.printf("Unknown(%04x)", value);
});
out.put("]");
out.printf(", init=%u, capacity=%u, length=%u>", initializedLength, capacity,
length);
}
#endif
#ifdef DEBUG
static mozilla::Atomic<bool, mozilla::Relaxed> gShapeConsistencyChecksEnabled(
false);
/* static */
void js::NativeObject::enableShapeConsistencyChecks() {
gShapeConsistencyChecksEnabled = true;
}
void js::NativeObject::checkShapeConsistency() {
if (!gShapeConsistencyChecksEnabled) {
return;
}
MOZ_ASSERT(is<NativeObject>());
if (PropMap* map = shape()->propMap()) {
map->checkConsistency(this);
} else {
MOZ_ASSERT(shape()->propMapLength() == 0);
}
}
#endif
#ifdef DEBUG
bool js::NativeObject::slotInRange(uint32_t slot,
SentinelAllowed sentinel) const {
MOZ_ASSERT(!gc::IsForwarded(shape()));
uint32_t capacity = numFixedSlots() + numDynamicSlots();
if (sentinel == SENTINEL_ALLOWED) {
return slot <= capacity;
}
return slot < capacity;
}
bool js::NativeObject::slotIsFixed(uint32_t slot) const {
// We call numFixedSlotsMaybeForwarded() to allow reading slots of
// associated objects in trace hooks that may be called during a moving GC.
return slot < numFixedSlotsMaybeForwarded();
}
bool js::NativeObject::isNumFixedSlots(uint32_t nfixed) const {
// We call numFixedSlotsMaybeForwarded() to allow reading slots of
// associated objects in trace hooks that may be called during a moving GC.
return nfixed == numFixedSlotsMaybeForwarded();
}
uint32_t js::NativeObject::outOfLineNumDynamicSlots() const {
return numDynamicSlots();
}
#endif /* DEBUG */
mozilla::Maybe<PropertyInfo> js::NativeObject::lookup(JSContext* cx, jsid id) {
MOZ_ASSERT(is<NativeObject>());
uint32_t index;
if (PropMap* map = shape()->lookup(cx, id, &index)) {
return mozilla::Some(map->getPropertyInfo(index));
}
return mozilla::Nothing();
}
mozilla::Maybe<PropertyInfo> js::NativeObject::lookupPure(jsid id) {
MOZ_ASSERT(is<NativeObject>());
uint32_t index;
if (PropMap* map = shape()->lookupPure(id, &index)) {
return mozilla::Some(map->getPropertyInfo(index));
}
return mozilla::Nothing();
}
bool NativeObject::setUniqueId(JSRuntime* runtime, uint64_t uid) {
MOZ_ASSERT(!hasUniqueId());
MOZ_ASSERT(!gc::HasUniqueId(this));
Nursery& nursery = runtime->gc.nursery();
if (!hasDynamicSlots() && !allocateSlots(nursery, 0)) {
return false;
}
getSlotsHeader()->setUniqueId(uid);
return true;
}
bool NativeObject::setOrUpdateUniqueId(JSContext* cx, uint64_t uid) {
if (!hasDynamicSlots() && !allocateSlots(cx->nursery(), 0)) {
ReportOutOfMemory(cx);
return false;
}
getSlotsHeader()->setUniqueId(uid);
return true;
}
bool NativeObject::growSlots(JSContext* cx, uint32_t oldCapacity,
uint32_t newCapacity) {
MOZ_ASSERT(newCapacity > oldCapacity);
/*
* Slot capacities are determined by the span of allocated objects. Due to
* the limited number of bits to store shape slots, object growth is
* throttled well before the slot capacity can overflow.
*/
NativeObject::slotsSizeMustNotOverflow();
MOZ_ASSERT(newCapacity <= MAX_SLOTS_COUNT);
if (!hasDynamicSlots()) {
if (!allocateSlots(cx->nursery(), newCapacity)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
uint64_t uid = maybeUniqueId();
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
HeapSlot* allocation = ReallocateCellBuffer<HeapSlot>(
cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
newAllocated, js::MallocArena);
if (!allocation) {
return false; /* Leave slots at its old size. */
}
auto* newHeaderSlots =
new (allocation) ObjectSlots(newCapacity, dictionarySpan, uid);
slots_ = newHeaderSlots->slots();
Debug_SetSlotRangeToCrashOnTouch(slots_ + oldCapacity,
newCapacity - oldCapacity);
RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
MemoryUse::ObjectSlots);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
MOZ_ASSERT(hasDynamicSlots());
return true;
}
bool NativeObject::growSlotsForNewSlot(JSContext* cx, uint32_t numFixed,
uint32_t slot) {
MOZ_ASSERT(slotSpan() == slot);
MOZ_ASSERT(shape()->numFixedSlots() == numFixed);
MOZ_ASSERT(slot >= numFixed);
uint32_t newCapacity = calculateDynamicSlots(numFixed, slot + 1, getClass());
uint32_t oldCapacity = numDynamicSlots();
MOZ_ASSERT(oldCapacity < newCapacity);
return growSlots(cx, oldCapacity, newCapacity);
}
bool NativeObject::allocateInitialSlots(JSContext* cx, uint32_t capacity) {
uint32_t count = ObjectSlots::allocCount(capacity);
HeapSlot* allocation = AllocateCellBuffer<HeapSlot>(cx, this, count);
if (MOZ_UNLIKELY(!allocation)) {
// The new object will be unreachable, but we have to make it safe for
// finalization. It can also be observed with dumpHeap().
// Give it a dummy shape that has no dynamic slots.
setShape(GlobalObject::getEmptyPlainObjectShape(cx));
initEmptyDynamicSlots();
return false;
}
auto* headerSlots = new (allocation)
ObjectSlots(capacity, 0, ObjectSlots::NoUniqueIdInDynamicSlots);
slots_ = headerSlots->slots();
Debug_SetSlotRangeToCrashOnTouch(slots_, capacity);
if (!IsInsideNursery(this)) {
AddCellMemory(this, ObjectSlots::allocSize(capacity),
MemoryUse::ObjectSlots);
}
MOZ_ASSERT(hasDynamicSlots());
return true;
}
bool NativeObject::allocateSlots(Nursery& nursery, uint32_t newCapacity) {
MOZ_ASSERT(!hasUniqueId());
MOZ_ASSERT(!hasDynamicSlots());
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
HeapSlot* allocation =
AllocateCellBuffer<HeapSlot>(nursery, this, newAllocated);
if (!allocation) {
return false;
}
auto* newHeaderSlots = new (allocation) ObjectSlots(
newCapacity, dictionarySpan, ObjectSlots::NoUniqueIdInDynamicSlots);
slots_ = newHeaderSlots->slots();
Debug_SetSlotRangeToCrashOnTouch(slots_, newCapacity);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
MOZ_ASSERT(hasDynamicSlots());
return true;
}
/* static */
bool NativeObject::growSlotsPure(JSContext* cx, NativeObject* obj,
uint32_t newCapacity) {
// IC code calls this directly.
AutoUnsafeCallWithABI unsafe;
if (!obj->growSlots(cx, obj->numDynamicSlots(), newCapacity)) {
cx->recoverFromOutOfMemory();
return false;
}
return true;
}
/* static */
bool NativeObject::addDenseElementPure(JSContext* cx, NativeObject* obj) {
// IC code calls this directly.
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
MOZ_ASSERT(obj->isExtensible());
MOZ_ASSERT(!obj->isIndexed());
MOZ_ASSERT(!obj->is<TypedArrayObject>());
MOZ_ASSERT_IF(obj->is<ArrayObject>(),
obj->as<ArrayObject>().lengthIsWritable());
// growElements will report OOM also if the number of dense elements will
// exceed MAX_DENSE_ELEMENTS_COUNT. See goodElementsAllocationAmount.
uint32_t oldCapacity = obj->getDenseCapacity();
if (MOZ_UNLIKELY(!obj->growElements(cx, oldCapacity + 1))) {
cx->recoverFromOutOfMemory();
return false;
}
MOZ_ASSERT(obj->getDenseCapacity() > oldCapacity);
MOZ_ASSERT(obj->getDenseCapacity() <= MAX_DENSE_ELEMENTS_COUNT);
return true;
}
static inline void FreeSlots(JSContext* cx, NativeObject* obj,
ObjectSlots* slots, size_t nbytes) {
// Note: this is called when shrinking slots, not from the finalizer.
if (obj->isTenured()) {
MOZ_ASSERT(!cx->nursery().isInside(slots));
js_free(slots);
} else {
cx->nursery().freeBuffer(slots, nbytes);
}
}
void NativeObject::shrinkSlots(JSContext* cx, uint32_t oldCapacity,
uint32_t newCapacity) {
MOZ_ASSERT(hasDynamicSlots());
MOZ_ASSERT(newCapacity < oldCapacity);
MOZ_ASSERT(oldCapacity == getSlotsHeader()->capacity());
ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
uint64_t uid = maybeUniqueId();
uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
if (newCapacity == 0 && uid == 0) {
size_t nbytes = ObjectSlots::allocSize(oldCapacity);
RemoveCellMemory(this, nbytes, MemoryUse::ObjectSlots);
FreeSlots(cx, this, oldHeaderSlots, nbytes);
// dictionarySlotSpan is initialized to the correct value by the callers.
setEmptyDynamicSlots(0);
return;
}
MOZ_ASSERT_IF(!is<ArrayObject>() && !hasUniqueId(),
newCapacity >= SLOT_CAPACITY_MIN);
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
HeapSlot* allocation = ReallocateCellBuffer<HeapSlot>(
cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
newAllocated, js::MallocArena);
if (!allocation) {
// It's possible for realloc to fail when shrinking an allocation. In this
// case we continue using the original allocation but still update the
// capacity to the new requested capacity, which is smaller than the actual
// capacity.
cx->recoverFromOutOfMemory();
allocation = reinterpret_cast<HeapSlot*>(getSlotsHeader());
}
RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
MemoryUse::ObjectSlots);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
auto* newHeaderSlots =
new (allocation) ObjectSlots(newCapacity, dictionarySpan, uid);
slots_ = newHeaderSlots->slots();
}
void NativeObject::initFixedElements(gc::AllocKind kind, uint32_t length) {
uint32_t capacity =
gc::GetGCKindSlots(kind) - ObjectElements::VALUES_PER_HEADER;
setFixedElements();
new (getElementsHeader()) ObjectElements(capacity, length);
getElementsHeader()->flags |= ObjectElements::FIXED;
MOZ_ASSERT(hasFixedElements());
}
bool NativeObject::willBeSparseElements(uint32_t requiredCapacity,
uint32_t newElementsHint) {
MOZ_ASSERT(is<NativeObject>());
MOZ_ASSERT(requiredCapacity > MIN_SPARSE_INDEX);
uint32_t cap = getDenseCapacity();
MOZ_ASSERT(requiredCapacity >= cap);
if (requiredCapacity > MAX_DENSE_ELEMENTS_COUNT) {
return true;
}
uint32_t minimalDenseCount = requiredCapacity / SPARSE_DENSITY_RATIO;
if (newElementsHint >= minimalDenseCount) {
return false;
}
minimalDenseCount -= newElementsHint;
if (minimalDenseCount > cap) {
return true;
}
uint32_t len = getDenseInitializedLength();
const Value* elems = getDenseElements();
for (uint32_t i = 0; i < len; i++) {
if (!elems[i].isMagic(JS_ELEMENTS_HOLE) && !--minimalDenseCount) {
return false;
}
}
return true;
}
/* static */
DenseElementResult NativeObject::maybeDensifySparseElements(
JSContext* cx, Handle<NativeObject*> obj) {
/*
* Wait until after the object goes into dictionary mode, which must happen
* when sparsely packing any array with more than MIN_SPARSE_INDEX elements
* (see PropertyTree::MAX_HEIGHT).
*/
if (!obj->inDictionaryMode()) {
return DenseElementResult::Incomplete;
}
/*
* Only measure the number of indexed properties every log(n) times when
* populating the object.
*/
uint32_t slotSpan = obj->slotSpan();
if (slotSpan != RoundUpPow2(slotSpan)) {
return DenseElementResult::Incomplete;
}
/* Watch for conditions under which an object's elements cannot be dense. */
if (!obj->isExtensible()) {
return DenseElementResult::Incomplete;
}
/*
* The indexes in the object need to be sufficiently dense before they can
* be converted to dense mode.
*/
uint32_t numDenseElements = 0;
uint32_t newInitializedLength = 0;
for (ShapePropertyIter<NoGC> iter(obj->shape()); !iter.done(); iter++) {
uint32_t index;
if (!IdIsIndex(iter->key(), &index)) {
continue;
}
if (iter->flags() != PropertyFlags::defaultDataPropFlags) {
// For simplicity, only densify the object if all indexed properties can
// be converted to dense elements.
return DenseElementResult::Incomplete;
}
MOZ_ASSERT(iter->isDataProperty());
numDenseElements++;
newInitializedLength = std::max(newInitializedLength, index + 1);
}
if (numDenseElements * SPARSE_DENSITY_RATIO < newInitializedLength) {
return DenseElementResult::Incomplete;
}
if (newInitializedLength > MAX_DENSE_ELEMENTS_COUNT) {
return DenseElementResult::Incomplete;
}
/*
* This object meets all necessary restrictions, convert all indexed
* properties into dense elements.
*/
if (newInitializedLength > obj->getDenseCapacity()) {
if (!obj->growElements(cx, newInitializedLength)) {
return DenseElementResult::Failure;
}
}
obj->ensureDenseInitializedLength(newInitializedLength, 0);
if (obj->compartment()->objectMaybeInIteration(obj)) {
// Mark the densified elements as maybe-in-iteration. See also the comment
// in GetIterator.
obj->markDenseElementsMaybeInIteration();
}
if (!NativeObject::densifySparseElements(cx, obj)) {
return DenseElementResult::Failure;
}
return DenseElementResult::Success;
}
void NativeObject::moveShiftedElements() {
MOZ_ASSERT(isExtensible());
ObjectElements* header = getElementsHeader();
uint32_t numShifted = header->numShiftedElements();
MOZ_ASSERT(numShifted > 0);
uint32_t initLength = header->initializedLength;
ObjectElements* newHeader =
static_cast<ObjectElements*>(getUnshiftedElementsHeader());
memmove(newHeader, header, sizeof(ObjectElements));
newHeader->clearShiftedElements();
newHeader->capacity += numShifted;
elements_ = newHeader->elements();
// To move the elements, temporarily update initializedLength to include
// the shifted elements.
newHeader->initializedLength += numShifted;
// Move the elements. Initialize to |undefined| to ensure pre-barriers
// don't see garbage.
for (size_t i = 0; i < numShifted; i++) {
initDenseElement(i, UndefinedValue());
}
moveDenseElements(0, numShifted, initLength);
// Restore the initialized length. We use setDenseInitializedLength to
// make sure prepareElementRangeForOverwrite is called on the shifted
// elements.
setDenseInitializedLength(initLength);
}
void NativeObject::maybeMoveShiftedElements() {
MOZ_ASSERT(isExtensible());
ObjectElements* header = getElementsHeader();
MOZ_ASSERT(header->numShiftedElements() > 0);
// Move the elements if less than a third of the allocated space is in use.
if (header->capacity < header->numAllocatedElements() / 3) {
moveShiftedElements();
}
}
bool NativeObject::tryUnshiftDenseElements(uint32_t count) {
MOZ_ASSERT(isExtensible());
MOZ_ASSERT(count > 0);
ObjectElements* header = getElementsHeader();
uint32_t numShifted = header->numShiftedElements();
if (count > numShifted) {
// We need more elements than are easily available. Try to make space
// for more elements than we need (and shift the remaining ones) so
// that unshifting more elements later will be fast.
// Don't bother reserving elements if the number of elements is small.
// Note that there's no technical reason for using this particular
// limit.
if (header->initializedLength <= 10 ||
header->hasNonwritableArrayLength() ||
MOZ_UNLIKELY(count > ObjectElements::MaxShiftedElements)) {
return false;
}
MOZ_ASSERT(header->capacity >= header->initializedLength);
uint32_t unusedCapacity = header->capacity - header->initializedLength;
// Determine toShift, the number of extra elements we want to make
// available.
uint32_t toShift = count - numShifted;
MOZ_ASSERT(toShift <= ObjectElements::MaxShiftedElements,
"count <= MaxShiftedElements so toShift <= MaxShiftedElements");
// Give up if we need to allocate more elements.
if (toShift > unusedCapacity) {
return false;
}
// Move more elements than we need, so that other unshift calls will be
// fast. We just have to make sure we don't exceed unusedCapacity.
toShift = std::min(toShift + unusedCapacity / 2, unusedCapacity);
// Ensure |numShifted + toShift| does not exceed MaxShiftedElements.
if (numShifted + toShift > ObjectElements::MaxShiftedElements) {
toShift = ObjectElements::MaxShiftedElements - numShifted;
}
MOZ_ASSERT(count <= numShifted + toShift);
MOZ_ASSERT(numShifted + toShift <= ObjectElements::MaxShiftedElements);
MOZ_ASSERT(toShift <= unusedCapacity);
// Now move/unshift the elements.
uint32_t initLen = header->initializedLength;
setDenseInitializedLength(initLen + toShift);
for (uint32_t i = 0; i < toShift; i++) {
initDenseElement(initLen + i, UndefinedValue());
}
moveDenseElements(toShift, 0, initLen);
// Shift the elements we just prepended.
shiftDenseElementsUnchecked(toShift);
// We can now fall-through to the fast path below.
header = getElementsHeader();
MOZ_ASSERT(header->numShiftedElements() == numShifted + toShift);
numShifted = header->numShiftedElements();
MOZ_ASSERT(count <= numShifted);
}
elements_ -= count;
ObjectElements* newHeader = getElementsHeader();
memmove(newHeader, header, sizeof(ObjectElements));
newHeader->unshiftShiftedElements(count);
// Initialize to |undefined| to ensure pre-barriers don't see garbage.
for (uint32_t i = 0; i < count; i++) {
initDenseElement(i, UndefinedValue());
}
return true;
}
// Given a requested capacity (in elements) and (potentially) the length of an
// array for which elements are being allocated, compute an actual allocation
// amount (in elements). (Allocation amounts include space for an
// ObjectElements instance, so a return value of |N| implies
// |N - ObjectElements::VALUES_PER_HEADER| usable elements.)
//
// The requested/actual allocation distinction is meant to:
//
// * preserve amortized O(N) time to add N elements;
// * minimize the number of unused elements beyond an array's length, and
// * provide at least ELEMENT_CAPACITY_MIN elements no matter what (so adding
// the first several elements to small arrays only needs one allocation).
//
// Note: the structure and behavior of this method follow along with
// UnboxedArrayObject::chooseCapacityIndex. Changes to the allocation strategy
// in one should generally be matched by the other.
/* static */
bool NativeObject::goodElementsAllocationAmount(JSContext* cx,
uint32_t reqCapacity,
uint32_t length,
uint32_t* goodAmount) {
if (reqCapacity > MAX_DENSE_ELEMENTS_COUNT) {
ReportOutOfMemory(cx);
return false;
}
uint32_t reqAllocated = reqCapacity + ObjectElements::VALUES_PER_HEADER;
// Handle "small" requests primarily by doubling.
const uint32_t Mebi = 1 << 20;
if (reqAllocated < Mebi) {
uint32_t amount =
mozilla::AssertedCast<uint32_t>(RoundUpPow2(reqAllocated));
// If |amount| would be 2/3 or more of the array's length, adjust
// it (up or down) to be equal to the array's length. This avoids
// allocating excess elements that aren't likely to be needed, either
// in this resizing or a subsequent one. The 2/3 factor is chosen so
// that exceptional resizings will at most triple the capacity, as
// opposed to the usual doubling.
uint32_t goodCapacity = amount - ObjectElements::VALUES_PER_HEADER;
if (length >= reqCapacity && goodCapacity > (length / 3) * 2) {
amount = length + ObjectElements::VALUES_PER_HEADER;
}
if (amount < ELEMENT_CAPACITY_MIN) {
amount = ELEMENT_CAPACITY_MIN;
}
*goodAmount = amount;
return true;
}