Source code

Revision control

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/NativeObject-inl.h"
#include "mozilla/Casting.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/Maybe.h"
#include <algorithm>
#include <iterator>
#include "debugger/DebugAPI.h"
#include "gc/Marking.h"
#include "gc/MaybeRooted.h"
#include "jit/BaselineIC.h"
#include "js/CharacterEncoding.h"
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
#include "js/friend/StackLimits.h" // js::CheckRecursionLimit{,DontReport}
#include "js/Result.h"
#include "js/Value.h"
#include "util/Memory.h"
#include "vm/EqualityOperations.h" // js::SameValue
#include "vm/PlainObject.h" // js::PlainObject
#include "vm/TypedArrayObject.h"
#include "gc/Nursery-inl.h"
#include "vm/ArrayObject-inl.h"
#include "vm/BytecodeLocation-inl.h"
#include "vm/EnvironmentObject-inl.h"
#include "vm/JSObject-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/Shape-inl.h"
using namespace js;
using JS::AutoCheckCannotGC;
using mozilla::CheckedInt;
using mozilla::DebugOnly;
using mozilla::PodCopy;
using mozilla::RoundUpPow2;
struct EmptyObjectElements {
const ObjectElements emptyElementsHeader;
// Add an extra (unused) Value to make sure an out-of-bounds index when
// masked (resulting in index 0) accesses valid memory.
const Value val;
public:
constexpr EmptyObjectElements()
: emptyElementsHeader(0, 0), val(UndefinedValue()) {}
explicit constexpr EmptyObjectElements(ObjectElements::SharedMemory shmem)
: emptyElementsHeader(0, 0, shmem), val(UndefinedValue()) {}
};
static constexpr EmptyObjectElements emptyElementsHeader;
/* Objects with no elements share one empty set of elements. */
HeapSlot* const js::emptyObjectElements = reinterpret_cast<HeapSlot*>(
uintptr_t(&emptyElementsHeader) + sizeof(ObjectElements));
static constexpr EmptyObjectElements emptyElementsHeaderShared(
ObjectElements::SharedMemory::IsShared);
/* Objects with no elements share one empty set of elements. */
HeapSlot* const js::emptyObjectElementsShared = reinterpret_cast<HeapSlot*>(
uintptr_t(&emptyElementsHeaderShared) + sizeof(ObjectElements));
struct EmptyObjectSlots : public ObjectSlots {
explicit constexpr EmptyObjectSlots(size_t dictionarySlotSpan)
: ObjectSlots(0, dictionarySlotSpan) {}
};
static constexpr EmptyObjectSlots emptyObjectSlotsHeaders[17] = {
EmptyObjectSlots(0), EmptyObjectSlots(1), EmptyObjectSlots(2),
EmptyObjectSlots(3), EmptyObjectSlots(4), EmptyObjectSlots(5),
EmptyObjectSlots(6), EmptyObjectSlots(7), EmptyObjectSlots(8),
EmptyObjectSlots(9), EmptyObjectSlots(10), EmptyObjectSlots(11),
EmptyObjectSlots(12), EmptyObjectSlots(13), EmptyObjectSlots(14),
EmptyObjectSlots(15), EmptyObjectSlots(16)};
static_assert(std::size(emptyObjectSlotsHeaders) ==
NativeObject::MAX_FIXED_SLOTS + 1);
HeapSlot* const js::emptyObjectSlotsForDictionaryObject[17] = {
emptyObjectSlotsHeaders[0].slots(), emptyObjectSlotsHeaders[1].slots(),
emptyObjectSlotsHeaders[2].slots(), emptyObjectSlotsHeaders[3].slots(),
emptyObjectSlotsHeaders[4].slots(), emptyObjectSlotsHeaders[5].slots(),
emptyObjectSlotsHeaders[6].slots(), emptyObjectSlotsHeaders[7].slots(),
emptyObjectSlotsHeaders[8].slots(), emptyObjectSlotsHeaders[9].slots(),
emptyObjectSlotsHeaders[10].slots(), emptyObjectSlotsHeaders[11].slots(),
emptyObjectSlotsHeaders[12].slots(), emptyObjectSlotsHeaders[13].slots(),
emptyObjectSlotsHeaders[14].slots(), emptyObjectSlotsHeaders[15].slots(),
emptyObjectSlotsHeaders[16].slots()};
static_assert(std::size(emptyObjectSlotsForDictionaryObject) ==
NativeObject::MAX_FIXED_SLOTS + 1);
HeapSlot* const js::emptyObjectSlots = emptyObjectSlotsForDictionaryObject[0];
#ifdef DEBUG
bool NativeObject::canHaveNonEmptyElements() {
return !this->is<TypedArrayObject>();
}
#endif // DEBUG
/* static */
void ObjectElements::PrepareForPreventExtensions(JSContext* cx,
NativeObject* obj) {
if (!obj->hasEmptyElements()) {
obj->shrinkCapacityToInitializedLength(cx);
}
// shrinkCapacityToInitializedLength ensures there are no shifted elements.
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
}
/* static */
void ObjectElements::PreventExtensions(NativeObject* obj) {
MOZ_ASSERT(!obj->isExtensible());
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
if (!obj->hasEmptyElements()) {
obj->getElementsHeader()->setNotExtensible();
}
}
/* static */
bool ObjectElements::FreezeOrSeal(JSContext* cx, HandleNativeObject obj,
IntegrityLevel level) {
MOZ_ASSERT_IF(level == IntegrityLevel::Frozen && obj->is<ArrayObject>(),
!obj->as<ArrayObject>().lengthIsWritable());
MOZ_ASSERT(!obj->isExtensible());
MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
if (obj->hasEmptyElements() || obj->denseElementsAreFrozen()) {
return true;
}
if (level == IntegrityLevel::Frozen) {
if (!JSObject::setFlag(cx, obj, ObjectFlag::FrozenElements,
JSObject::GENERATE_SHAPE)) {
return false;
}
}
if (!obj->denseElementsAreSealed()) {
obj->getElementsHeader()->seal();
}
if (level == IntegrityLevel::Frozen) {
obj->getElementsHeader()->freeze();
}
return true;
}
#ifdef DEBUG
static mozilla::Atomic<bool, mozilla::Relaxed> gShapeConsistencyChecksEnabled(
false);
/* static */
void js::NativeObject::enableShapeConsistencyChecks() {
gShapeConsistencyChecksEnabled = true;
}
void js::NativeObject::checkShapeConsistency() {
if (!gShapeConsistencyChecksEnabled) {
return;
}
MOZ_ASSERT(is<NativeObject>());
Shape* shape = lastProperty();
Shape* prev = nullptr;
AutoCheckCannotGC nogc;
if (inDictionaryMode()) {
if (ShapeTable* table = shape->maybeTable(nogc)) {
for (uint32_t fslot = table->freeList(); fslot != SHAPE_INVALID_SLOT;
fslot = getSlot(fslot).toPrivateUint32()) {
MOZ_ASSERT(fslot < slotSpan());
}
while (shape->parent) {
MOZ_ASSERT_IF(lastProperty() != shape, !shape->hasTable());
ShapeTable::Entry& entry =
table->search<MaybeAdding::NotAdding>(shape->propid(), nogc);
MOZ_ASSERT(entry.shape() == shape);
shape = shape->parent;
}
}
shape = lastProperty();
while (shape) {
MOZ_ASSERT_IF(!shape->isEmptyShape() && shape->isDataProperty(),
shape->slot() < slotSpan());
if (!prev) {
MOZ_ASSERT(lastProperty() == shape);
MOZ_ASSERT(shape->dictNext.toObject() == this);
} else {
MOZ_ASSERT(shape->dictNext.toShape() == prev);
}
prev = shape;
shape = shape->parent;
}
} else {
while (shape->parent) {
if (ShapeTable* table = shape->maybeTable(nogc)) {
MOZ_ASSERT(shape->parent);
for (Shape::Range<NoGC> r(shape); !r.empty(); r.popFront()) {
ShapeTable::Entry& entry =
table->search<MaybeAdding::NotAdding>(r.front().propid(), nogc);
MOZ_ASSERT(entry.shape() == &r.front());
}
}
if (prev) {
MOZ_ASSERT_IF(shape->isDataProperty(),
prev->maybeSlot() >= shape->maybeSlot());
shape->children.checkHasChild(prev);
}
prev = shape;
shape = shape->parent;
}
}
}
#endif
void js::NativeObject::initializeSlotRange(uint32_t start, uint32_t end) {
/*
* No bounds check, as this is used when the object's shape does not
* reflect its allocated slots (updateSlotsForSpan).
*/
HeapSlot* fixedStart;
HeapSlot* fixedEnd;
HeapSlot* slotsStart;
HeapSlot* slotsEnd;
getSlotRangeUnchecked(start, end, &fixedStart, &fixedEnd, &slotsStart,
&slotsEnd);
uint32_t offset = start;
for (HeapSlot* sp = fixedStart; sp < fixedEnd; sp++) {
sp->init(this, HeapSlot::Slot, offset++, UndefinedValue());
}
for (HeapSlot* sp = slotsStart; sp < slotsEnd; sp++) {
sp->init(this, HeapSlot::Slot, offset++, UndefinedValue());
}
}
void js::NativeObject::initSlots(const Value* vector, uint32_t length) {
HeapSlot* fixedStart;
HeapSlot* fixedEnd;
HeapSlot* slotsStart;
HeapSlot* slotsEnd;
getSlotRange(0, length, &fixedStart, &fixedEnd, &slotsStart, &slotsEnd);
uint32_t offset = 0;
for (HeapSlot* sp = fixedStart; sp < fixedEnd; sp++) {
sp->init(this, HeapSlot::Slot, offset++, *vector++);
}
for (HeapSlot* sp = slotsStart; sp < slotsEnd; sp++) {
sp->init(this, HeapSlot::Slot, offset++, *vector++);
}
}
#ifdef DEBUG
bool js::NativeObject::slotInRange(uint32_t slot,
SentinelAllowed sentinel) const {
MOZ_ASSERT(!gc::IsForwarded(lastProperty()));
uint32_t capacity = numFixedSlots() + numDynamicSlots();
if (sentinel == SENTINEL_ALLOWED) {
return slot <= capacity;
}
return slot < capacity;
}
bool js::NativeObject::slotIsFixed(uint32_t slot) const {
// We call numFixedSlotsMaybeForwarded() to allow reading slots of
// associated objects in trace hooks that may be called during a moving GC.
return slot < numFixedSlotsMaybeForwarded();
}
bool js::NativeObject::isNumFixedSlots(uint32_t nfixed) const {
// We call numFixedSlotsMaybeForwarded() to allow reading slots of
// associated objects in trace hooks that may be called during a moving GC.
return nfixed == numFixedSlotsMaybeForwarded();
}
#endif /* DEBUG */
Shape* js::NativeObject::lookup(JSContext* cx, jsid id) {
MOZ_ASSERT(is<NativeObject>());
return Shape::search(cx, lastProperty(), id);
}
Shape* js::NativeObject::lookupPure(jsid id) {
MOZ_ASSERT(is<NativeObject>());
return Shape::searchNoHashify(lastProperty(), id);
}
bool NativeObject::ensureSlotsForDictionaryObject(JSContext* cx,
uint32_t span) {
MOZ_ASSERT(inDictionaryMode());
size_t oldSpan = dictionaryModeSlotSpan();
if (oldSpan == span) {
return true;
}
if (!updateSlotsForSpan(cx, oldSpan, span)) {
return false;
}
setDictionaryModeSlotSpan(span);
return true;
}
bool NativeObject::growSlots(JSContext* cx, uint32_t oldCapacity,
uint32_t newCapacity) {
MOZ_ASSERT(newCapacity > oldCapacity);
MOZ_ASSERT_IF(!is<ArrayObject>(), newCapacity >= SLOT_CAPACITY_MIN);
/*
* Slot capacities are determined by the span of allocated objects. Due to
* the limited number of bits to store shape slots, object growth is
* throttled well before the slot capacity can overflow.
*/
NativeObject::slotsSizeMustNotOverflow();
MOZ_ASSERT(newCapacity <= MAX_SLOTS_COUNT);
if (!hasDynamicSlots()) {
return allocateSlots(cx, newCapacity);
}
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
HeapSlot* allocation = ReallocateObjectBuffer<HeapSlot>(
cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
newAllocated);
if (!allocation) {
return false; /* Leave slots at its old size. */
}
auto* newHeaderSlots =
new (allocation) ObjectSlots(newCapacity, dictionarySpan);
slots_ = newHeaderSlots->slots();
Debug_SetSlotRangeToCrashOnTouch(slots_ + oldCapacity,
newCapacity - oldCapacity);
RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
MemoryUse::ObjectSlots);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
MOZ_ASSERT(hasDynamicSlots());
return true;
}
bool NativeObject::allocateSlots(JSContext* cx, uint32_t newCapacity) {
MOZ_ASSERT(!hasDynamicSlots());
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
HeapSlot* allocation = AllocateObjectBuffer<HeapSlot>(cx, this, newAllocated);
if (!allocation) {
return false;
}
auto* newHeaderSlots =
new (allocation) ObjectSlots(newCapacity, dictionarySpan);
slots_ = newHeaderSlots->slots();
Debug_SetSlotRangeToCrashOnTouch(slots_, newCapacity);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
MOZ_ASSERT(hasDynamicSlots());
return true;
}
/* static */
bool NativeObject::growSlotsPure(JSContext* cx, NativeObject* obj,
uint32_t newCapacity) {
// IC code calls this directly.
AutoUnsafeCallWithABI unsafe;
if (!obj->growSlots(cx, obj->numDynamicSlots(), newCapacity)) {
cx->recoverFromOutOfMemory();
return false;
}
return true;
}
/* static */
bool NativeObject::addDenseElementPure(JSContext* cx, NativeObject* obj) {
// IC code calls this directly.
AutoUnsafeCallWithABI unsafe;
MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
MOZ_ASSERT(obj->isExtensible());
MOZ_ASSERT(!obj->isIndexed());
MOZ_ASSERT(!obj->is<TypedArrayObject>());
MOZ_ASSERT_IF(obj->is<ArrayObject>(),
obj->as<ArrayObject>().lengthIsWritable());
// growElements will report OOM also if the number of dense elements will
// exceed MAX_DENSE_ELEMENTS_COUNT. See goodElementsAllocationAmount.
uint32_t oldCapacity = obj->getDenseCapacity();
if (MOZ_UNLIKELY(!obj->growElements(cx, oldCapacity + 1))) {
cx->recoverFromOutOfMemory();
return false;
}
MOZ_ASSERT(obj->getDenseCapacity() > oldCapacity);
MOZ_ASSERT(obj->getDenseCapacity() <= MAX_DENSE_ELEMENTS_COUNT);
return true;
}
static inline void FreeSlots(JSContext* cx, NativeObject* obj,
ObjectSlots* slots, size_t nbytes) {
if (cx->isHelperThreadContext()) {
js_free(slots);
} else if (obj->isTenured()) {
MOZ_ASSERT(!cx->nursery().isInside(slots));
js_free(slots);
} else {
cx->nursery().freeBuffer(slots, nbytes);
}
}
void NativeObject::shrinkSlots(JSContext* cx, uint32_t oldCapacity,
uint32_t newCapacity) {
MOZ_ASSERT(newCapacity < oldCapacity);
MOZ_ASSERT(oldCapacity == getSlotsHeader()->capacity());
uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
if (newCapacity == 0) {
size_t nbytes = ObjectSlots::allocSize(oldCapacity);
RemoveCellMemory(this, nbytes, MemoryUse::ObjectSlots);
FreeSlots(cx, this, oldHeaderSlots, nbytes);
setEmptyDynamicSlots(dictionarySpan);
return;
}
MOZ_ASSERT_IF(!is<ArrayObject>(), newCapacity >= SLOT_CAPACITY_MIN);
uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
HeapSlot* allocation = ReallocateObjectBuffer<HeapSlot>(
cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
newAllocated);
if (!allocation) {
// It's possible for realloc to fail when shrinking an allocation. In this
// case we continue using the original allocation but still update the
// capacity to the new requested capacity, which is smaller than the actual
// capacity.
cx->recoverFromOutOfMemory();
allocation = reinterpret_cast<HeapSlot*>(getSlotsHeader());
}
RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
MemoryUse::ObjectSlots);
AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
MemoryUse::ObjectSlots);
auto* newHeaderSlots =
new (allocation) ObjectSlots(newCapacity, dictionarySpan);
slots_ = newHeaderSlots->slots();
}
bool NativeObject::willBeSparseElements(uint32_t requiredCapacity,
uint32_t newElementsHint) {
MOZ_ASSERT(is<NativeObject>());
MOZ_ASSERT(requiredCapacity > MIN_SPARSE_INDEX);
uint32_t cap = getDenseCapacity();
MOZ_ASSERT(requiredCapacity >= cap);
if (requiredCapacity > MAX_DENSE_ELEMENTS_COUNT) {
return true;
}
uint32_t minimalDenseCount = requiredCapacity / SPARSE_DENSITY_RATIO;
if (newElementsHint >= minimalDenseCount) {
return false;
}
minimalDenseCount -= newElementsHint;
if (minimalDenseCount > cap) {
return true;
}
uint32_t len = getDenseInitializedLength();
const Value* elems = getDenseElements();
for (uint32_t i = 0; i < len; i++) {
if (!elems[i].isMagic(JS_ELEMENTS_HOLE) && !--minimalDenseCount) {
return false;
}
}
return true;
}
/* static */
DenseElementResult NativeObject::maybeDensifySparseElements(
JSContext* cx, HandleNativeObject obj) {
/*
* Wait until after the object goes into dictionary mode, which must happen
* when sparsely packing any array with more than MIN_SPARSE_INDEX elements
* (see PropertyTree::MAX_HEIGHT).
*/
if (!obj->inDictionaryMode()) {
return DenseElementResult::Incomplete;
}
/*
* Only measure the number of indexed properties every log(n) times when
* populating the object.
*/
uint32_t slotSpan = obj->slotSpan();
if (slotSpan != RoundUpPow2(slotSpan)) {
return DenseElementResult::Incomplete;
}
/* Watch for conditions under which an object's elements cannot be dense. */
if (!obj->isExtensible()) {
return DenseElementResult::Incomplete;
}
/*
* The indexes in the object need to be sufficiently dense before they can
* be converted to dense mode.
*/
uint32_t numDenseElements = 0;
uint32_t newInitializedLength = 0;
RootedShape shape(cx, obj->lastProperty());
while (!shape->isEmptyShape()) {
uint32_t index;
if (IdIsIndex(shape->propid(), &index)) {
if (shape->attributes() == JSPROP_ENUMERATE &&
shape->hasDefaultGetter() && shape->hasDefaultSetter()) {
numDenseElements++;
newInitializedLength = std::max(newInitializedLength, index + 1);
} else {
/*
* For simplicity, only densify the object if all indexed
* properties can be converted to dense elements.
*/
return DenseElementResult::Incomplete;
}
}
shape = shape->previous();
}
if (numDenseElements * SPARSE_DENSITY_RATIO < newInitializedLength) {
return DenseElementResult::Incomplete;
}
if (newInitializedLength > MAX_DENSE_ELEMENTS_COUNT) {
return DenseElementResult::Incomplete;
}
/*
* This object meets all necessary restrictions, convert all indexed
* properties into dense elements.
*/
if (newInitializedLength > obj->getDenseCapacity()) {
if (!obj->growElements(cx, newInitializedLength)) {
return DenseElementResult::Failure;
}
}
obj->ensureDenseInitializedLength(newInitializedLength, 0);
if (ObjectRealm::get(obj).objectMaybeInIteration(obj)) {
// Mark the densified elements as maybe-in-iteration. See also the comment
// in GetIterator.
obj->markDenseElementsMaybeInIteration();
}
RootedValue value(cx);
shape = obj->lastProperty();
while (!shape->isEmptyShape()) {
jsid id = shape->propid();
uint32_t index;
if (IdIsIndex(id, &index)) {
value = obj->getSlot(shape->slot());
/*
* When removing a property from a dictionary, the specified
* property will be removed from the dictionary list and the
* last property will then be changed due to reshaping the object.
* Compute the next shape in the traverse, watching for such
* removals from the list.
*/
if (shape != obj->lastProperty()) {
shape = shape->previous();
if (!NativeObject::removeProperty(cx, obj, id)) {
return DenseElementResult::Failure;
}
} else {
if (!NativeObject::removeProperty(cx, obj, id)) {
return DenseElementResult::Failure;
}
shape = obj->lastProperty();
}
obj->setDenseElement(index, value);
} else {
shape = shape->previous();
}
}
/*
* All indexed properties on the object are now dense, clear the indexed
* flag so that we will not start using sparse indexes again if we need
* to grow the object.
*/
if (!NativeObject::clearFlag(cx, obj, ObjectFlag::Indexed)) {
return DenseElementResult::Failure;
}
return DenseElementResult::Success;
}
void NativeObject::moveShiftedElements() {
MOZ_ASSERT(isExtensible());
ObjectElements* header = getElementsHeader();
uint32_t numShifted = header->numShiftedElements();
MOZ_ASSERT(numShifted > 0);
uint32_t initLength = header->initializedLength;
ObjectElements* newHeader =
static_cast<ObjectElements*>(getUnshiftedElementsHeader());
memmove(newHeader, header, sizeof(ObjectElements));
newHeader->clearShiftedElements();
newHeader->capacity += numShifted;
elements_ = newHeader->elements();
// To move the elements, temporarily update initializedLength to include
// the shifted elements.
newHeader->initializedLength += numShifted;
// Move the elements. Initialize to |undefined| to ensure pre-barriers
// don't see garbage.
for (size_t i = 0; i < numShifted; i++) {
initDenseElement(i, UndefinedValue());
}
moveDenseElements(0, numShifted, initLength);
// Restore the initialized length. We use setDenseInitializedLength to
// make sure prepareElementRangeForOverwrite is called on the shifted
// elements.
setDenseInitializedLength(initLength);
}
void NativeObject::maybeMoveShiftedElements() {
MOZ_ASSERT(isExtensible());
ObjectElements* header = getElementsHeader();
MOZ_ASSERT(header->numShiftedElements() > 0);
// Move the elements if less than a third of the allocated space is in use.
if (header->capacity < header->numAllocatedElements() / 3) {
moveShiftedElements();
}
}
bool NativeObject::tryUnshiftDenseElements(uint32_t count) {
MOZ_ASSERT(isExtensible());
MOZ_ASSERT(count > 0);
ObjectElements* header = getElementsHeader();
uint32_t numShifted = header->numShiftedElements();
if (count > numShifted) {
// We need more elements than are easily available. Try to make space
// for more elements than we need (and shift the remaining ones) so
// that unshifting more elements later will be fast.
// Don't bother reserving elements if the number of elements is small.
// Note that there's no technical reason for using this particular
// limit.
if (header->initializedLength <= 10 ||
header->hasNonwritableArrayLength() ||
MOZ_UNLIKELY(count > ObjectElements::MaxShiftedElements)) {
return false;
}
MOZ_ASSERT(header->capacity >= header->initializedLength);
uint32_t unusedCapacity = header->capacity - header->initializedLength;
// Determine toShift, the number of extra elements we want to make
// available.
uint32_t toShift = count - numShifted;
MOZ_ASSERT(toShift <= ObjectElements::MaxShiftedElements,
"count <= MaxShiftedElements so toShift <= MaxShiftedElements");
// Give up if we need to allocate more elements.
if (toShift > unusedCapacity) {
return false;
}
// Move more elements than we need, so that other unshift calls will be
// fast. We just have to make sure we don't exceed unusedCapacity.
toShift = std::min(toShift + unusedCapacity / 2, unusedCapacity);
// Ensure |numShifted + toShift| does not exceed MaxShiftedElements.
if (numShifted + toShift > ObjectElements::MaxShiftedElements) {
toShift = ObjectElements::MaxShiftedElements - numShifted;
}
MOZ_ASSERT(count <= numShifted + toShift);
MOZ_ASSERT(numShifted + toShift <= ObjectElements::MaxShiftedElements);
MOZ_ASSERT(toShift <= unusedCapacity);
// Now move/unshift the elements.
uint32_t initLen = header->initializedLength;
setDenseInitializedLength(initLen + toShift);
for (uint32_t i = 0; i < toShift; i++) {
initDenseElement(initLen + i, UndefinedValue());
}
moveDenseElements(toShift, 0, initLen);
// Shift the elements we just prepended.
shiftDenseElementsUnchecked(toShift);
// We can now fall-through to the fast path below.
header = getElementsHeader();
MOZ_ASSERT(header->numShiftedElements() == numShifted + toShift);
numShifted = header->numShiftedElements();
MOZ_ASSERT(count <= numShifted);
}
elements_ -= count;
ObjectElements* newHeader = getElementsHeader();
memmove(newHeader, header, sizeof(ObjectElements));
newHeader->unshiftShiftedElements(count);
// Initialize to |undefined| to ensure pre-barriers don't see garbage.
for (uint32_t i = 0; i < count; i++) {
initDenseElement(i, UndefinedValue());
}
return true;
}
// Given a requested capacity (in elements) and (potentially) the length of an
// array for which elements are being allocated, compute an actual allocation
// amount (in elements). (Allocation amounts include space for an
// ObjectElements instance, so a return value of |N| implies
// |N - ObjectElements::VALUES_PER_HEADER| usable elements.)
//
// The requested/actual allocation distinction is meant to:
//
// * preserve amortized O(N) time to add N elements;
// * minimize the number of unused elements beyond an array's length, and
// * provide at least SLOT_CAPACITY_MIN elements no matter what (so adding
// the first several elements to small arrays only needs one allocation).
//
// Note: the structure and behavior of this method follow along with
// UnboxedArrayObject::chooseCapacityIndex. Changes to the allocation strategy
// in one should generally be matched by the other.
/* static */
bool NativeObject::goodElementsAllocationAmount(JSContext* cx,
uint32_t reqCapacity,
uint32_t length,
uint32_t* goodAmount) {
if (reqCapacity > MAX_DENSE_ELEMENTS_COUNT) {
ReportOutOfMemory(cx);
return false;
}
uint32_t reqAllocated = reqCapacity + ObjectElements::VALUES_PER_HEADER;
// Handle "small" requests primarily by doubling.
const uint32_t Mebi = 1 << 20;
if (reqAllocated < Mebi) {
uint32_t amount =
mozilla::AssertedCast<uint32_t>(RoundUpPow2(reqAllocated));
// If |amount| would be 2/3 or more of the array's length, adjust
// it (up or down) to be equal to the array's length. This avoids
// allocating excess elements that aren't likely to be needed, either
// in this resizing or a subsequent one. The 2/3 factor is chosen so
// that exceptional resizings will at most triple the capacity, as
// opposed to the usual doubling.
uint32_t goodCapacity = amount - ObjectElements::VALUES_PER_HEADER;
if (length >= reqCapacity && goodCapacity > (length / 3) * 2) {
amount = length + ObjectElements::VALUES_PER_HEADER;
}
if (amount < SLOT_CAPACITY_MIN) {
amount = SLOT_CAPACITY_MIN;
}
*goodAmount = amount;
return true;
}
// The almost-doubling above wastes a lot of space for larger bucket sizes.
// For large amounts, switch to bucket sizes that obey this formula:
//
// count(n+1) = Math.ceil(count(n) * 1.125)
//
// where |count(n)| is the size of the nth bucket, measured in 2**20 slots.
// These bucket sizes still preserve amortized O(N) time to add N elements,
// just with a larger constant factor.
//
// The bucket size table below was generated with this JavaScript (and
// manual reformatting):
//
// for (let n = 1, i = 0; i < 34; i++) {
// print('0x' + (n * (1 << 20)).toString(16) + ', ');
// n = Math.ceil(n * 1.125);
// }
static constexpr uint32_t BigBuckets[] = {
0x100000, 0x200000, 0x300000, 0x400000, 0x500000, 0x600000,
0x700000, 0x800000, 0x900000, 0xb00000, 0xd00000, 0xf00000,
0x1100000, 0x1400000, 0x1700000, 0x1a00000, 0x1e00000, 0x2200000,
0x2700000, 0x2c00000, 0x3200000, 0x3900000, 0x4100000, 0x4a00000,
0x5400000, 0x5f00000, 0x6b00000, 0x7900000, 0x8900000, 0x9b00000,
0xaf00000, 0xc500000, 0xde00000, 0xfa00000};
static_assert(BigBuckets[std::size(BigBuckets) - 1] <=
MAX_DENSE_ELEMENTS_ALLOCATION);
// Pick the first bucket that'll fit |reqAllocated|.
for (uint32_t b : BigBuckets) {
if (b >= reqAllocated) {
*goodAmount = b;
return true;
}
}
// Otherwise, return the maximum bucket size.
*goodAmount = MAX_DENSE_ELEMENTS_ALLOCATION;
return true;
}
bool NativeObject::growElements(JSContext* cx, uint32_t reqCapacity) {
MOZ_ASSERT(isExtensible());
MOZ_ASSERT(canHaveNonEmptyElements());
// If there are shifted elements, consider moving them first. If we don't
// move them here, the code below will include the shifted elements in the
// resize.
uint32_t numShifted = getElementsHeader()->numShiftedElements();
if (numShifted > 0) {
// If the number of elements is small, it's cheaper to just move them as
// it may avoid a malloc/realloc. Note that there's no technical reason
// for using this particular value, but it works well in real-world use
// cases.
static const size_t MaxElementsToMoveEagerly = 20;
if (getElementsHeader()->initializedLength <= MaxElementsToMoveEagerly) {
moveShiftedElements();
} else {
maybeMoveShiftedElements();
}
if (getDenseCapacity() >= reqCapacity) {
return true;
}
numShifted = getElementsHeader()->numShiftedElements();
// If |reqCapacity + numShifted| overflows, we just move all shifted
// elements to avoid the problem.
CheckedInt<uint32_t> checkedReqCapacity(reqCapacity);
checkedReqCapacity += numShifted;
if (MOZ_UNLIKELY(!checkedReqCapacity.isValid())) {
moveShiftedElements();
numShifted = 0;
}
}
uint32_t oldCapacity = getDenseCapacity();
MOZ_ASSERT(oldCapacity < reqCapacity);
uint32_t newAllocated = 0;
if (is<ArrayObject>() && !as<ArrayObject>().lengthIsWritable()) {
MOZ_ASSERT(reqCapacity <= as<ArrayObject>().length());
MOZ_ASSERT(reqCapacity <= MAX_DENSE_ELEMENTS_COUNT);
// Preserve the |capacity <= length| invariant for arrays with
// non-writable length. See also js::ArraySetLength which initially
// enforces this requirement.
newAllocated = reqCapacity + numShifted + ObjectElements::VALUES_PER_HEADER;
} else {
if (!goodElementsAllocationAmount(cx, reqCapacity + numShifted,
getElementsHeader()->length,
&newAllocated)) {
return false;
}
}
uint32_t newCapacity =
newAllocated - ObjectElements::VALUES_PER_HEADER - numShifted;
MOZ_ASSERT(newCapacity > oldCapacity && newCapacity >= reqCapacity);
// If newCapacity exceeds MAX_DENSE_ELEMENTS_COUNT, the array should become
// sparse.
MOZ_ASSERT(newCapacity <= MAX_DENSE_ELEMENTS_COUNT);
uint32_t initlen = getDenseInitializedLength();
HeapSlot* oldHeaderSlots =
reinterpret_cast<HeapSlot*>(getUnshiftedElementsHeader());
HeapSlot* newHeaderSlots;
uint32_t oldAllocated = 0;
if (hasDynamicElements()) {
MOZ_ASSERT(oldCapacity <= MAX_DENSE_ELEMENTS_COUNT);
oldAllocated = oldCapacity + ObjectElements::VALUES_PER_HEADER + numShifted;
newHeaderSlots = ReallocateObjectBuffer<HeapSlot>(
cx, this, oldHeaderSlots, oldAllocated, newAllocated);
if (!newHeaderSlots) {
return false; // Leave elements at its old size.
}
} else {
newHeaderSlots = AllocateObjectBuffer<HeapSlot>(cx, this, newAllocated);
if (!newHeaderSlots) {
return false; // Leave elements at its old size.
}
PodCopy(newHeaderSlots, oldHeaderSlots,
ObjectElements::VALUES_PER_HEADER + initlen + numShifted);
}
if (oldAllocated) {
RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
}
ObjectElements* newheader = reinterpret_cast<ObjectElements*>(newHeaderSlots);
elements_ = newheader->elements() + numShifted;
getElementsHeader()->capacity = newCapacity;
Debug_SetSlotRangeToCrashOnTouch(elements_ + initlen, newCapacity - initlen);
AddCellMemory(this, newAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
return true;
}
void NativeObject::shrinkElements(JSContext* cx, uint32_t reqCapacity) {
MOZ_ASSERT(canHaveNonEmptyElements());
MOZ_ASSERT(reqCapacity >= getDenseInitializedLength());
if (!hasDynamicElements()) {
return;
}
// If we have shifted elements, consider moving them.
uint32_t numShifted = getElementsHeader()->numShiftedElements();
if (numShifted > 0) {
maybeMoveShiftedElements();
numShifted = getElementsHeader()->numShiftedElements();
}
uint32_t oldCapacity = getDenseCapacity();
MOZ_ASSERT(reqCapacity < oldCapacity);
uint32_t newAllocated = 0;
MOZ_ALWAYS_TRUE(goodElementsAllocationAmount(cx, reqCapacity + numShifted, 0,
&newAllocated));
MOZ_ASSERT(oldCapacity <= MAX_DENSE_ELEMENTS_COUNT);
uint32_t oldAllocated =
oldCapacity + ObjectElements::VALUES_PER_HEADER + numShifted;
if (newAllocated == oldAllocated) {
return; // Leave elements at its old size.
}
MOZ_ASSERT(newAllocated > ObjectElements::VALUES_PER_HEADER);
uint32_t newCapacity =
newAllocated - ObjectElements::VALUES_PER_HEADER - numShifted;
MOZ_ASSERT(newCapacity <= MAX_DENSE_ELEMENTS_COUNT);
HeapSlot* oldHeaderSlots =
reinterpret_cast<HeapSlot*>(getUnshiftedElementsHeader());
HeapSlot* newHeaderSlots = ReallocateObjectBuffer<HeapSlot>(
cx, this, oldHeaderSlots, oldAllocated, newAllocated);
if (!newHeaderSlots) {
cx->recoverFromOutOfMemory();
return; // Leave elements at its old size.
}
RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
ObjectElements* newheader = reinterpret_cast<ObjectElements*>(newHeaderSlots);
elements_ = newheader->elements() + numShifted;
getElementsHeader()->capacity = newCapacity;
AddCellMemory(this, newAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
}
void NativeObject::shrinkCapacityToInitializedLength(JSContext* cx) {
// When an array's length becomes non-writable, writes to indexes greater
// greater than or equal to the length don't change the array. We handle this
// with a check for non-writable length in most places. But in JIT code every
// check counts -- so we piggyback the check on the already-required range
// check for |index < capacity| by making capacity of arrays with non-writable
// length never exceed the length. This mechanism is also used when an object
// becomes non-extensible.
if (getElementsHeader()->numShiftedElements() > 0) {
moveShiftedElements();
}
ObjectElements* header = getElementsHeader();
uint32_t len = header->initializedLength;
MOZ_ASSERT(header->capacity >= len);
if (header->capacity == len) {
return;
}
shrinkElements(cx, len);
header = getElementsHeader();
uint32_t oldAllocated = header->numAllocatedElements();
header->capacity = len;
// The size of the memory allocation hasn't changed but we lose the actual
// capacity information. Make the associated size match the updated capacity.
if (!hasFixedElements()) {
uint32_t newAllocated = header->numAllocatedElements();
RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
AddCellMemory(this, newAllocated * sizeof(HeapSlot),
MemoryUse::ObjectElements);
}
}
/* static */
bool NativeObject::allocDictionarySlot(JSContext* cx, HandleNativeObject obj,
uint32_t* slotp) {
MOZ_ASSERT(obj->inDictionaryMode());
uint32_t slot = obj->slotSpan();
MOZ_ASSERT(slot >= JSSLOT_FREE(obj->getClass()));
// Try to pull a free slot from the shape table's slot-number free list.
// Shapes without a ShapeTable have an empty free list, because we only
// purge ShapeTables with an empty free list.
{
AutoCheckCannotGC nogc;
if (ShapeTable* table = obj->lastProperty()->maybeTable(nogc)) {
uint32_t last = table->freeList();
if (last != SHAPE_INVALID_SLOT) {
#ifdef DEBUG
MOZ_ASSERT(last < slot);
uint32_t next = obj->getSlot(last).toPrivateUint32();
MOZ_ASSERT_IF(next != SHAPE_INVALID_SLOT, next < slot);
#endif
*slotp = last;
const Value& vref = obj->getSlot(last);
table->setFreeList(vref.toPrivateUint32());
obj->setSlot(last, UndefinedValue());
return true;
}
}
}
if (slot >= SHAPE_MAXIMUM_SLOT) {
ReportOutOfMemory(cx);
return false;
}
*slotp = slot;
return obj->ensureSlotsForDictionaryObject(cx, slot + 1);
}
void NativeObject::freeSlot(JSContext* cx, uint32_t slot) {
MOZ_ASSERT(slot < slotSpan());
if (inDictionaryMode()) {
// Ensure we have a ShapeTable as it stores the object's free list (the
// list of available slots in dictionary objects).
AutoCheckCannotGC nogc;
if (ShapeTable* table =
lastProperty()->ensureTableForDictionary(cx, nogc)) {
uint32_t last = table->freeList();
// Can't afford to check the whole free list, but let's check the head.
MOZ_ASSERT_IF(last != SHAPE_INVALID_SLOT,
last < slotSpan() && last != slot);
// Place all freed slots other than reserved slots (bug 595230) on the
// dictionary's free list.
if (JSSLOT_FREE(getClass()) <= slot) {
MOZ_ASSERT_IF(last != SHAPE_INVALID_SLOT, last < slotSpan());
setSlot(slot, PrivateUint32Value(last));
table->setFreeList(slot);
return;
}
} else {
// OOM while creating the ShapeTable holding the free list. We can
// recover from it - it just means we won't be able to reuse this
// slot later.
cx->recoverFromOutOfMemory();
}
}
setSlot(slot, UndefinedValue());
}
/* static */
Shape* NativeObject::addDataProperty(JSContext* cx, HandleNativeObject obj,
HandlePropertyName name, uint32_t slot,
unsigned attrs) {
MOZ_ASSERT(!(attrs & (JSPROP_GETTER | JSPROP_SETTER)));
RootedId id(cx, NameToId(name));
return addDataProperty(cx, obj, id, slot, attrs);
}
template <AllowGC allowGC>
bool js::NativeLookupOwnProperty(
JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
typename MaybeRooted<jsid, allowGC>::HandleType id,
typename MaybeRooted<PropertyResult, allowGC>::MutableHandleType propp) {
return NativeLookupOwnPropertyInline<allowGC>(cx, obj, id, propp);
}
template bool js::NativeLookupOwnProperty<CanGC>(
JSContext* cx, HandleNativeObject obj, HandleId id,
MutableHandle<PropertyResult> propp);
template bool js::NativeLookupOwnProperty<NoGC>(
JSContext* cx, NativeObject* const& obj, const jsid& id,
FakeMutableHandle<PropertyResult> propp);
/*** [[DefineOwnProperty]] **************************************************/
static MOZ_ALWAYS_INLINE bool CallAddPropertyHook(JSContext* cx,
HandleNativeObject obj,
HandleId id,
HandleValue value) {
JSAddPropertyOp addProperty = obj->getClass()->getAddProperty();
if (MOZ_UNLIKELY(addProperty)) {
MOZ_ASSERT(!cx->isHelperThreadContext());
if (!CallJSAddPropertyOp(cx, addProperty, obj, id, value)) {
NativeObject::removeProperty(cx, obj, id);
return false;
}
}
return true;
}
static MOZ_ALWAYS_INLINE bool CallAddPropertyHookDense(JSContext* cx,
HandleNativeObject obj,
uint32_t index,
HandleValue value) {
// Inline addProperty for array objects.
if (obj->is<ArrayObject>()) {
ArrayObject* arr = &obj->as<ArrayObject>();
uint32_t length = arr->length();
if (index >= length) {
arr->setLength(index + 1);
}
return true;
}
JSAddPropertyOp addProperty = obj->getClass()->getAddProperty();
if (MOZ_UNLIKELY(addProperty)) {
MOZ_ASSERT(!cx->isHelperThreadContext());
RootedId id(cx, INT_TO_JSID(index));
if (!CallJSAddPropertyOp(cx, addProperty, obj, id, value)) {
obj->setDenseElementHole(index);
return false;
}
}
return true;
}
/**
* Determines whether a write to the given element on |arr| should fail
* because |arr| has a non-writable length, and writing that element would
* increase the length of the array.
*/
static bool WouldDefinePastNonwritableLength(ArrayObject* arr, uint32_t index) {
return !arr->lengthIsWritable() && index >= arr->length();
}
static bool ReshapeForShadowedPropSlow(JSContext* cx, HandleNativeObject obj,
HandleId id) {
MOZ_ASSERT(obj->isDelegate());
// Lookups on integer ids cannot be cached through prototypes.
if (JSID_IS_INT(id)) {
return true;
}
RootedObject proto(cx, obj->staticPrototype());
while (proto) {
// Lookups will not be cached through non-native protos.
if (!proto->is<NativeObject>()) {
break;
}
if (proto->as<NativeObject>().contains(cx, id)) {
return NativeObject::reshapeForShadowedProp(cx, proto.as<NativeObject>());
}
proto = proto->staticPrototype();
}
return true;
}
static MOZ_ALWAYS_INLINE bool ReshapeForShadowedProp(JSContext* cx,
HandleObject obj,
HandleId id) {
// If |obj| is a prototype of another object, check if we're shadowing a
// property on its proto chain. In this case we need to reshape that object
// for shape teleporting to work correctly.
//
// See also the 'Shape Teleporting Optimization' comment in jit/CacheIR.cpp.
// Inlined fast path for non-prototype/non-native objects.
if (!obj->isDelegate() || !obj->is<NativeObject>()) {
return true;
}
return ReshapeForShadowedPropSlow(cx, obj.as<NativeObject>(), id);
}
/* static */
bool NativeObject::reshapeForShadowedProp(JSContext* cx,
HandleNativeObject obj) {
return generateOwnShape(cx, obj);
}
enum class IsAddOrChange { Add, AddOrChange };
template <IsAddOrChange AddOrChange>
static MOZ_ALWAYS_INLINE bool AddOrChangeProperty(
JSContext* cx, HandleNativeObject obj, HandleId id,
Handle<PropertyDescriptor> desc) {
desc.assertComplete();
if (!ReshapeForShadowedProp(cx, obj, id)) {
return false;
}
// Use dense storage for new indexed properties where possible.
if (JSID_IS_INT(id) && !desc.getter() && !desc.setter() &&
desc.attributes() == JSPROP_ENUMERATE &&
(!obj->isIndexed() || !obj->containsPure(id)) &&
!obj->is<TypedArrayObject>()) {
uint32_t index = JSID_TO_INT(id);
DenseElementResult edResult = obj->ensureDenseElements(cx, index, 1);
if (edResult == DenseElementResult::Failure) {
return false;
}
if (edResult == DenseElementResult::Success) {
obj->setDenseElement(index, desc.value());
if (!CallAddPropertyHookDense(cx, obj, index, desc.value())) {
return false;
}
return true;
}
}
// If we know this is a new property we can call addProperty instead of
// the slower putProperty.
if constexpr (AddOrChange == IsAddOrChange::Add) {
if (Shape::isDataProperty(desc.attributes(), desc.getter(),
desc.setter())) {
Shape* shape = NativeObject::addDataProperty(
cx, obj, id, SHAPE_INVALID_SLOT, desc.attributes());
if (!shape) {
return false;
}
obj->initSlot(shape->slot(), desc.value());
} else {
if (!NativeObject::addAccessorProperty(
cx, obj, id, desc.getter(), desc.setter(), desc.attributes())) {
return false;
}
}
} else {
if (Shape::isDataProperty(desc.attributes(), desc.getter(),
desc.setter())) {
Shape* shape =
NativeObject::putDataProperty(cx, obj, id, desc.attributes());
if (!shape) {
return false;
}
obj->setSlot(shape->slot(), desc.value());
} else {
if (!NativeObject::putAccessorProperty(
cx, obj, id, desc.getter(), desc.setter(), desc.attributes())) {
return false;
}
}
}
// Clear any existing dense index after adding a sparse indexed property,
// and investigate converting the object to dense indexes.
if (JSID_IS_INT(id)) {
uint32_t index = JSID_TO_INT(id);
obj->removeDenseElementForSparseIndex(index);
DenseElementResult edResult =
NativeObject::maybeDensifySparseElements(cx, obj);
if (edResult == DenseElementResult::Failure) {
return false;
}
if (edResult == DenseElementResult::Success) {
MOZ_ASSERT(!desc.setter());
return CallAddPropertyHookDense(cx, obj, index, desc.value());
}
}
return CallAddPropertyHook(cx, obj, id, desc.value());
}
// Versions of AddOrChangeProperty optimized for adding a plain data property.
// These function doesn't handle integer ids as we may have to store them in
// dense elements.
static MOZ_ALWAYS_INLINE bool AddDataProperty(JSContext* cx,
HandleNativeObject obj,
HandleId id, HandleValue v) {
MOZ_ASSERT(!JSID_IS_INT(id));
if (!ReshapeForShadowedProp(cx, obj, id)) {
return false;
}
Shape* shape = NativeObject::addEnumerableDataProperty(cx, obj, id);
if (!shape) {
return false;
}
obj->setSlot(shape->slot(), v);
return CallAddPropertyHook(cx, obj, id, v);
}
static bool IsConfigurable(unsigned attrs) {
return (attrs & JSPROP_PERMANENT) == 0;
}
static bool IsEnumerable(unsigned attrs) {
return (attrs & JSPROP_ENUMERATE) != 0;
}
static bool IsWritable(unsigned attrs) {
return (attrs & JSPROP_READONLY) == 0;
}
static bool IsAccessorDescriptor(unsigned attrs) {
return (attrs & (JSPROP_GETTER | JSPROP_SETTER)) != 0;
}
static bool IsDataDescriptor(unsigned attrs) {
MOZ_ASSERT((attrs & (JSPROP_IGNORE_VALUE | JSPROP_IGNORE_READONLY)) == 0);
return !IsAccessorDescriptor(attrs);
}
template <AllowGC allowGC>
static MOZ_ALWAYS_INLINE bool GetExistingProperty(
JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType receiver,
typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
typename MaybeRooted<Shape*, allowGC>::HandleType shape,
typename MaybeRooted<Value, allowGC>::MutableHandleType vp);
static bool GetExistingPropertyValue(JSContext* cx, HandleNativeObject obj,
HandleId id, Handle<PropertyResult> prop,
MutableHandleValue vp) {
if (prop.isDenseElement()) {
vp.set(obj->getDenseElement(prop.denseElementIndex()));
return true;
}
if (prop.isTypedArrayElement()) {
size_t idx = prop.typedArrayElementIndex();
return obj->as<TypedArrayObject>().getElement<CanGC>(cx, idx, vp);
}
MOZ_ASSERT(!cx->isHelperThreadContext());
MOZ_ASSERT(prop.shape()->propid() == id);
MOZ_ASSERT(obj->contains(cx, prop.shape()));
RootedValue receiver(cx, ObjectValue(*obj));
RootedShape shape(cx, prop.shape());
return GetExistingProperty<CanGC>(cx, receiver, obj, shape, vp);
}
/*
* If desc is redundant with an existing own property obj[id], then set
* |*redundant = true| and return true.
*/
static bool DefinePropertyIsRedundant(JSContext* cx, HandleNativeObject obj,
HandleId id, Handle<PropertyResult> prop,
unsigned shapeAttrs,
Handle<PropertyDescriptor> desc,
bool* redundant) {
*redundant = false;
if (desc.hasConfigurable() &&
desc.configurable() != IsConfigurable(shapeAttrs)) {
return true;
}
if (desc.hasEnumerable() && desc.enumerable() != IsEnumerable(shapeAttrs)) {
return true;
}
if (desc.isDataDescriptor()) {
if (IsAccessorDescriptor(shapeAttrs)) {
return true;
}
if (desc.hasWritable() && desc.writable() != IsWritable(shapeAttrs)) {
return true;
}
if (desc.hasValue()) {
// Get the current value of the existing property.
RootedValue currentValue(cx);
if (prop.isNativeProperty() && prop.shape()->isDataProperty()) {
// Inline GetExistingPropertyValue in order to omit a type
// correctness assertion that's too strict for this particular
// call site. For details, see bug 1125624 comments 13-16.
currentValue.set(obj->getSlot(prop.shape()->slot()));
} else {
if (!GetExistingPropertyValue(cx, obj, id, prop, &currentValue)) {
return false;
}
}
// Don't call SameValue here to ensure we properly update distinct
// NaN values.
if (desc.value() != currentValue) {
return true;
}
}
GetterOp existingGetterOp =
prop.isNativeProperty() ? prop.shape()->getter() : nullptr;
if (desc.getter() != existingGetterOp) {
return true;
}
SetterOp existingSetterOp =
prop.isNativeProperty() ? prop.shape()->setter() : nullptr;
if (desc.setter() != existingSetterOp) {
return true;
}
} else {
if (desc.hasGetterObject() &&
(!(shapeAttrs & JSPROP_GETTER) ||
desc.getterObject() != prop.shape()->getterObject())) {
return true;
}
if (desc.hasSetterObject() &&
(!(shapeAttrs & JSPROP_SETTER) ||
desc.setterObject() != prop.shape()->setterObject())) {
return true;
}
}
*redundant = true;
return true;
}
bool js::NativeDefineProperty(JSContext* cx, HandleNativeObject obj,
HandleId id, Handle<PropertyDescriptor> desc_,
ObjectOpResult& result) {
desc_.assertValid();
// Section numbers and step numbers below refer to ES2018, draft rev
// 540b827fccf6122a984be99ab9af7be20e3b5562.
//
// This function aims to implement 9.1.6 [[DefineOwnProperty]] as well as
// the [[DefineOwnProperty]] methods described in 9.4.2.1 (arrays), 9.4.4.2
// (arguments), and 9.4.5.3 (typed array views).
// Dispense with custom behavior of exotic native objects first.
if (obj->is<ArrayObject>()) {
// 9.4.2.1 step 2. Redefining an array's length is very special.
Rooted<ArrayObject*> arr(cx, &obj->as<ArrayObject>());
if (id == NameToId(cx->names().length)) {
// 9.1.6.3 ValidateAndApplyPropertyDescriptor, step 7.a.
if (desc_.isAccessorDescriptor()) {
return result.fail(JSMSG_CANT_REDEFINE_PROP);
}
MOZ_ASSERT(!cx->isHelperThreadContext());
return ArraySetLength(cx, arr, id, desc_.attributes(), desc_.value(),
result);
}
// 9.4.2.1 step 3. Don't extend a fixed-length array.
uint32_t index;
if (IdIsIndex(id, &index)) {
if (WouldDefinePastNonwritableLength(arr, index)) {
return result.fail(JSMSG_CANT_DEFINE_PAST_ARRAY_LENGTH);
}
}
} else if (obj->is<TypedArrayObject>()) {
// 9.4.5.3 step 3. Indexed properties of typed arrays are special.
Rooted<TypedArrayObject*> tobj(cx, &obj->as<TypedArrayObject>());
mozilla::Maybe<uint64_t> index;
if (!ToTypedArrayIndex(cx, id, &index)) {
return false;
}
if (index) {
MOZ_ASSERT(!cx->isHelperThreadContext());
return DefineTypedArrayElement(cx, tobj, index.value(), desc_, result);
}
} else if (obj->is<ArgumentsObject>()) {
Rooted<ArgumentsObject*> argsobj(cx, &obj->as<ArgumentsObject>());
if (id == NameToId(cx->names().length)) {
// Either we are resolving the .length property on this object,
// or redefining it. In the latter case only, we must reify the
// property. To distinguish the two cases, we note that when
// resolving, the JSPROP_RESOLVING mask is set; whereas the first
// time it is redefined, it isn't set.
if ((desc_.attributes() & JSPROP_RESOLVING) == 0) {
if (!ArgumentsObject::reifyLength(cx, argsobj)) {
return false;
}
}
} else if (JSID_IS_SYMBOL(id) &&
JSID_TO_SYMBOL(id) == cx->wellKnownSymbols().iterator) {
// Do same thing as .length for [@@iterator].
if ((desc_.attributes() & JSPROP_RESOLVING) == 0) {
if (!ArgumentsObject::reifyIterator(cx, argsobj)) {
return false;
}
}
} else if (JSID_IS_INT(id)) {
if ((desc_.attributes() & JSPROP_RESOLVING) == 0) {
argsobj->markElementOverridden();
}
}
}
// 9.1.6.1 OrdinaryDefineOwnProperty step 1.
Rooted<PropertyResult> prop(cx);
if (desc_.attributes() & JSPROP_RESOLVING) {
// We are being called from a resolve or enumerate hook to reify a
// lazily-resolved property. To avoid reentering the resolve hook and
// recursing forever, skip the resolve hook when doing this lookup.
if (!NativeLookupOwnPropertyNoResolve(cx, obj, id, prop.address())) {
return false;
}
} else {
if (!NativeLookupOwnProperty<CanGC>(cx, obj, id, &prop)) {
return false;
}
}
// From this point, the step numbers refer to
// 9.1.6.3, ValidateAndApplyPropertyDescriptor.
// Step 1 is a redundant assertion.
// Filling in desc: Here we make a copy of the desc_ argument. We will turn