Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sw=2 et tw=80:
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gc/Nursery-inl.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/Sprintf.h"
#include "mozilla/TimeStamp.h"
#include <algorithm>
#include <cmath>
#include <utility>
#include "builtin/MapObject.h"
#include "debugger/DebugAPI.h"
#include "gc/GCInternals.h"
#include "gc/GCLock.h"
#include "gc/GCParallelTask.h"
#include "gc/Memory.h"
#include "gc/PublicIterators.h"
#include "gc/Tenuring.h"
#include "jit/JitFrames.h"
#include "jit/JitZone.h"
#include "js/Printer.h"
#include "util/DifferentialTesting.h"
#include "util/GetPidProvider.h" // getpid()
#include "util/Poison.h"
#include "vm/JSONPrinter.h"
#include "vm/Realm.h"
#include "vm/Time.h"
#include "gc/Heap-inl.h"
#include "gc/Marking-inl.h"
#include "gc/StableCellHasher-inl.h"
#include "gc/StoreBuffer-inl.h"
#include "vm/GeckoProfiler-inl.h"
using namespace js;
using namespace js::gc;
using mozilla::DebugOnly;
using mozilla::PodCopy;
using mozilla::TimeDuration;
using mozilla::TimeStamp;
namespace js {
static constexpr size_t NurseryChunkHeaderSize =
RoundUp(sizeof(ChunkBase), CellAlignBytes);
// The amount of space in a nursery chunk available to allocations.
static constexpr size_t NurseryChunkUsableSize =
ChunkSize - NurseryChunkHeaderSize;
struct NurseryChunk : public ChunkBase {
alignas(CellAlignBytes) uint8_t data[NurseryChunkUsableSize];
static NurseryChunk* fromChunk(TenuredChunk* chunk, ChunkKind kind,
uint8_t index);
explicit NurseryChunk(JSRuntime* runtime, ChunkKind kind, uint8_t chunkIndex)
: ChunkBase(runtime, &runtime->gc.storeBuffer(), kind, chunkIndex) {}
void poisonRange(size_t start, size_t end, uint8_t value,
MemCheckKind checkKind);
void poisonAfterEvict(size_t extent = ChunkSize);
// Mark pages from startOffset to the end of the chunk as unused. The start
// offset must be after the first page, which contains the chunk header and is
// not marked as unused.
void markPagesUnusedHard(size_t startOffset);
// Mark pages from the second page of the chunk to endOffset as in use,
// following a call to markPagesUnusedHard.
[[nodiscard]] bool markPagesInUseHard(size_t endOffset);
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(this) + ChunkSize; }
};
static_assert(sizeof(NurseryChunk) == ChunkSize,
"Nursery chunk size must match Chunk size.");
static_assert(offsetof(NurseryChunk, data) == NurseryChunkHeaderSize);
class NurseryDecommitTask : public GCParallelTask {
public:
explicit NurseryDecommitTask(gc::GCRuntime* gc);
bool reserveSpaceForChunks(size_t nchunks);
bool isEmpty(const AutoLockHelperThreadState& lock) const;
void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock);
void queueRange(size_t newCapacity, NurseryChunk* chunk,
const AutoLockHelperThreadState& lock);
private:
struct Region {
NurseryChunk* chunk;
size_t startOffset;
};
using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>;
using RegionVector = Vector<Region, 2, SystemAllocPolicy>;
void run(AutoLockHelperThreadState& lock) override;
NurseryChunkVector& chunksToDecommit() { return chunksToDecommit_.ref(); }
const NurseryChunkVector& chunksToDecommit() const {
return chunksToDecommit_.ref();
}
RegionVector& regionsToDecommit() { return regionsToDecommit_.ref(); }
const RegionVector& regionsToDecommit() const {
return regionsToDecommit_.ref();
}
MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_;
MainThreadOrGCTaskData<RegionVector> regionsToDecommit_;
};
} // namespace js
inline void js::NurseryChunk::poisonRange(size_t start, size_t end,
uint8_t value,
MemCheckKind checkKind) {
MOZ_ASSERT(start >= NurseryChunkHeaderSize);
MOZ_ASSERT((start % gc::CellAlignBytes) == 0);
MOZ_ASSERT((end % gc::CellAlignBytes) == 0);
MOZ_ASSERT(end >= start);
MOZ_ASSERT(end <= ChunkSize);
auto* ptr = reinterpret_cast<uint8_t*>(this) + start;
size_t size = end - start;
// We can poison the same chunk more than once, so first make sure memory
// sanitizers will let us poison it.
MOZ_MAKE_MEM_UNDEFINED(ptr, size);
Poison(ptr, value, size, checkKind);
}
inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
poisonRange(NurseryChunkHeaderSize, extent, JS_SWEPT_NURSERY_PATTERN,
MemCheckKind::MakeNoAccess);
}
inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) {
MOZ_ASSERT(startOffset >= NurseryChunkHeaderSize); // Don't touch the header.
MOZ_ASSERT(startOffset >= SystemPageSize());
MOZ_ASSERT(startOffset <= ChunkSize);
uintptr_t start = uintptr_t(this) + startOffset;
size_t length = ChunkSize - startOffset;
MarkPagesUnusedHard(reinterpret_cast<void*>(start), length);
}
inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
MOZ_ASSERT(endOffset >= NurseryChunkHeaderSize);
MOZ_ASSERT(endOffset >= SystemPageSize());
MOZ_ASSERT(endOffset <= ChunkSize);
uintptr_t start = uintptr_t(this) + SystemPageSize();
size_t length = endOffset - SystemPageSize();
return MarkPagesInUseHard(reinterpret_cast<void*>(start), length);
}
// static
inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk,
ChunkKind kind,
uint8_t index) {
return new (chunk) NurseryChunk(chunk->runtime, kind, index);
}
js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc)
: GCParallelTask(gc, gcstats::PhaseKind::NONE) {
// This can occur outside GCs so doesn't have a stats phase.
MOZ_ALWAYS_TRUE(regionsToDecommit().reserve(2));
}
bool js::NurseryDecommitTask::isEmpty(
const AutoLockHelperThreadState& lock) const {
return chunksToDecommit().empty() && regionsToDecommit().empty();
}
bool js::NurseryDecommitTask::reserveSpaceForChunks(size_t nchunks) {
MOZ_ASSERT(isIdle());
return chunksToDecommit().reserve(nchunks);
}
void js::NurseryDecommitTask::queueChunk(
NurseryChunk* chunk, const AutoLockHelperThreadState& lock) {
MOZ_ASSERT(isIdle(lock));
MOZ_ALWAYS_TRUE(chunksToDecommit().append(chunk));
}
void js::NurseryDecommitTask::queueRange(
size_t newCapacity, NurseryChunk* chunk,
const AutoLockHelperThreadState& lock) {
MOZ_ASSERT(isIdle(lock));
MOZ_ASSERT(regionsToDecommit_.ref().length() < 2);
MOZ_ASSERT(newCapacity < ChunkSize);
MOZ_ASSERT(newCapacity % SystemPageSize() == 0);
regionsToDecommit().infallibleAppend(Region{chunk, newCapacity});
}
void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
while (!chunksToDecommit().empty()) {
NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
AutoUnlockHelperThreadState unlock(lock);
nurseryChunk->~NurseryChunk();
TenuredChunk* tenuredChunk = TenuredChunk::emplace(
nurseryChunk, gc, /* allMemoryCommitted = */ false);
AutoLockGC lock(gc);
gc->recycleChunk(tenuredChunk, lock);
}
while (!regionsToDecommit().empty()) {
Region region = regionsToDecommit().popCopy();
AutoUnlockHelperThreadState unlock(lock);
region.chunk->markPagesUnusedHard(region.startOffset);
}
}
js::Nursery::Nursery(GCRuntime* gc)
: toSpace(ChunkKind::NurseryToSpace),
fromSpace(ChunkKind::NurseryFromSpace),
gc(gc),
capacity_(0),
enableProfiling_(false),
semispaceEnabled_(gc::TuningDefaults::SemispaceNurseryEnabled),
canAllocateStrings_(true),
canAllocateBigInts_(true),
reportDeduplications_(false),
minorGCTriggerReason_(JS::GCReason::NO_REASON),
prevPosition_(0),
hasRecentGrowthData(false),
smoothedTargetSize(0.0) {
// Try to keep fields used by allocation fast path together at the start of
// the nursery.
static_assert(offsetof(Nursery, toSpace.position_) < TypicalCacheLineSize);
static_assert(offsetof(Nursery, toSpace.currentEnd_) < TypicalCacheLineSize);
const char* env = getenv("MOZ_NURSERY_STRINGS");
if (env && *env) {
canAllocateStrings_ = (*env == '1');
}
env = getenv("MOZ_NURSERY_BIGINTS");
if (env && *env) {
canAllocateBigInts_ = (*env == '1');
}
}
static void PrintAndExit(const char* message) {
fprintf(stderr, "%s", message);
exit(0);
}
static const char* GetEnvVar(const char* name, const char* helpMessage) {
const char* value = getenv(name);
if (!value) {
return nullptr;
}
if (strcmp(value, "help") == 0) {
PrintAndExit(helpMessage);
}
return value;
}
static bool GetBoolEnvVar(const char* name, const char* helpMessage) {
const char* env = GetEnvVar(name, helpMessage);
return env && bool(atoi(env));
}
static void ReadReportPretenureEnv(const char* name, const char* helpMessage,
AllocSiteFilter* filter) {
const char* env = GetEnvVar(name, helpMessage);
if (!env) {
return;
}
if (!AllocSiteFilter::readFromString(env, filter)) {
PrintAndExit(helpMessage);
}
}
bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
ReadProfileEnv("JS_GC_PROFILE_NURSERY",
"Report minor GCs taking at least N microseconds.\n",
&enableProfiling_, &profileWorkers_, &profileThreshold_);
reportDeduplications_ = GetBoolEnvVar(
"JS_GC_REPORT_STATS",
"JS_GC_REPORT_STATS=1\n"
"\tAfter a minor GC, report how many strings were deduplicated.\n");
#ifdef JS_GC_ZEAL
reportPromotion_ = GetBoolEnvVar(
"JS_GC_REPORT_PROMOTE",
"JS_GC_REPORT_PROMOTE=1\n"
"\tAfter a minor GC, report what kinds of things were promoted.\n");
#endif
ReadReportPretenureEnv(
"JS_GC_REPORT_PRETENURE",
"JS_GC_REPORT_PRETENURE=FILTER\n"
"\tAfter a minor GC, report information about pretenuring, including\n"
"\tallocation sites which match the filter specification. This is comma\n"
"\tseparated list of one or more elements which can include:\n"
"\t\tinteger N: report sites with at least N allocations\n"
"\t\t'normal': report normal sites used for pretenuring\n"
"\t\t'unknown': report catch-all sites for allocations without a\n"
"\t\t specific site associated with them\n"
"\t\t'optimized': report catch-all sites for allocations from\n"
"\t\t optimized JIT code\n"
"\t\t'missing': report automatically generated missing sites\n"
"\t\t'object': report sites associated with JS objects\n"
"\t\t'string': report sites associated with JS strings\n"
"\t\t'bigint': report sites associated with JS big ints\n"
"\t\t'longlived': report sites in the LongLived state (ignored for\n"
"\t\t catch-all sites)\n"
"\t\t'shortlived': report sites in the ShortLived state (ignored for\n"
"\t\t catch-all sites)\n"
"\tFilters of the same kind are combined with OR and of different kinds\n"
"\twith AND. Prefixes of the keywords above are accepted.\n",
&pretenuringReportFilter_);
decommitTask = MakeUnique<NurseryDecommitTask>(gc);
if (!decommitTask) {
return false;
}
if (!gc->storeBuffer().enable()) {
return false;
}
return initFirstChunk(lock);
}
js::Nursery::~Nursery() { disable(); }
void js::Nursery::enable() {
if (isEnabled()) {
return;
}
MOZ_ASSERT(isEmpty());
MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
{
AutoLockGCBgAlloc lock(gc);
if (!initFirstChunk(lock)) {
// If we fail to allocate memory, the nursery will not be enabled.
return;
}
}
#ifdef JS_GC_ZEAL
if (gc->hasZealMode(ZealMode::GenerationalGC)) {
enterZealMode();
}
#endif
updateAllZoneAllocFlags();
// This should always succeed after the first time it's called.
MOZ_ALWAYS_TRUE(gc->storeBuffer().enable());
}
bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) {
MOZ_ASSERT(!isEnabled());
MOZ_ASSERT(toSpace.chunks_.length() == 0);
MOZ_ASSERT(fromSpace.chunks_.length() == 0);
setCapacity(minSpaceSize());
size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_;
if (!decommitTask->reserveSpaceForChunks(nchunks) ||
!allocateNextChunk(lock)) {
setCapacity(0);
MOZ_ASSERT(toSpace.isEmpty());
MOZ_ASSERT(fromSpace.isEmpty());
return false;
}
toSpace.moveToStartOfChunk(this, 0);
toSpace.setStartToCurrentPosition();
if (semispaceEnabled_) {
fromSpace.moveToStartOfChunk(this, 0);
fromSpace.setStartToCurrentPosition();
}
MOZ_ASSERT(toSpace.isEmpty());
MOZ_ASSERT(fromSpace.isEmpty());
poisonAndInitCurrentChunk();
// Clear any information about previous collections.
clearRecentGrowthData();
tenureThreshold_ = 0;
#ifdef DEBUG
toSpace.checkKind(ChunkKind::NurseryToSpace);
fromSpace.checkKind(ChunkKind::NurseryFromSpace);
#endif
return true;
}
size_t RequiredChunkCount(size_t nbytes) {
return nbytes <= ChunkSize ? 1 : nbytes / ChunkSize;
}
void js::Nursery::setCapacity(size_t newCapacity) {
MOZ_ASSERT(newCapacity == roundSize(newCapacity));
capacity_ = newCapacity;
size_t count = RequiredChunkCount(newCapacity);
toSpace.maxChunkCount_ = count;
if (semispaceEnabled_) {
fromSpace.maxChunkCount_ = count;
}
}
void js::Nursery::disable() {
MOZ_ASSERT(isEmpty());
if (!isEnabled()) {
return;
}
// Free all chunks.
decommitTask->join();
freeChunksFrom(toSpace, 0);
freeChunksFrom(fromSpace, 0);
decommitTask->runFromMainThread();
setCapacity(0);
// We must reset currentEnd_ so that there is no space for anything in the
// nursery. JIT'd code uses this even if the nursery is disabled.
toSpace = Space(ChunkKind::NurseryToSpace);
fromSpace = Space(ChunkKind::NurseryFromSpace);
MOZ_ASSERT(toSpace.isEmpty());
MOZ_ASSERT(fromSpace.isEmpty());
gc->storeBuffer().disable();
if (gc->wasInitialized()) {
// This assumes there is an atoms zone.
updateAllZoneAllocFlags();
}
}
void js::Nursery::enableStrings() {
MOZ_ASSERT(isEmpty());
canAllocateStrings_ = true;
updateAllZoneAllocFlags();
}
void js::Nursery::disableStrings() {
MOZ_ASSERT(isEmpty());
canAllocateStrings_ = false;
updateAllZoneAllocFlags();
}
void js::Nursery::enableBigInts() {
MOZ_ASSERT(isEmpty());
canAllocateBigInts_ = true;
updateAllZoneAllocFlags();
}
void js::Nursery::disableBigInts() {
MOZ_ASSERT(isEmpty());
canAllocateBigInts_ = false;
updateAllZoneAllocFlags();
}
void js::Nursery::updateAllZoneAllocFlags() {
// The alloc flags are not relevant for the atoms zone, and flushing
// jit-related information can be problematic for the atoms zone.
for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
updateAllocFlagsForZone(zone);
}
}
void js::Nursery::getAllocFlagsForZone(JS::Zone* zone, bool* allocObjectsOut,
bool* allocStringsOut,
bool* allocBigIntsOut) {
*allocObjectsOut = isEnabled();
*allocStringsOut =
isEnabled() && canAllocateStrings() && !zone->nurseryStringsDisabled;
*allocBigIntsOut =
isEnabled() && canAllocateBigInts() && !zone->nurseryBigIntsDisabled;
}
void js::Nursery::setAllocFlagsForZone(JS::Zone* zone) {
bool allocObjects;
bool allocStrings;
bool allocBigInts;
getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts);
zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts);
}
void js::Nursery::updateAllocFlagsForZone(JS::Zone* zone) {
bool allocObjects;
bool allocStrings;
bool allocBigInts;
getAllocFlagsForZone(zone, &allocObjects, &allocStrings, &allocBigInts);
if (allocObjects != zone->allocNurseryObjects() ||
allocStrings != zone->allocNurseryStrings() ||
allocBigInts != zone->allocNurseryBigInts()) {
CancelOffThreadIonCompile(zone);
zone->setNurseryAllocFlags(allocObjects, allocStrings, allocBigInts);
discardCodeAndSetJitFlagsForZone(zone);
}
}
void js::Nursery::discardCodeAndSetJitFlagsForZone(JS::Zone* zone) {
zone->forceDiscardJitCode(runtime()->gcContext());
if (jit::JitZone* jitZone = zone->jitZone()) {
jitZone->discardStubs();
jitZone->setStringsCanBeInNursery(zone->allocNurseryStrings());
}
}
void js::Nursery::setSemispaceEnabled(bool enabled) {
if (semispaceEnabled() == enabled) {
return;
}
bool wasEnabled = isEnabled();
if (wasEnabled) {
if (!isEmpty()) {
gc->minorGC(JS::GCReason::EVICT_NURSERY);
}
disable();
}
semispaceEnabled_ = enabled;
if (wasEnabled) {
enable();
}
}
bool js::Nursery::isEmpty() const {
MOZ_ASSERT(fromSpace.isEmpty());
if (!isEnabled()) {
return true;
}
if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
MOZ_ASSERT(startChunk() == 0);
MOZ_ASSERT(startPosition() == chunk(0).start());
}
return toSpace.isEmpty();
}
bool js::Nursery::Space::isEmpty() const { return position_ == startPosition_; }
static size_t AdjustSizeForSemispace(size_t size, bool semispaceEnabled) {
if (!semispaceEnabled) {
return size;
}
return Nursery::roundSize(size / 2);
}
size_t js::Nursery::maxSpaceSize() const {
return AdjustSizeForSemispace(tunables().gcMaxNurseryBytes(),
semispaceEnabled_);
}
size_t js::Nursery::minSpaceSize() const {
return AdjustSizeForSemispace(tunables().gcMinNurseryBytes(),
semispaceEnabled_);
}
#ifdef JS_GC_ZEAL
void js::Nursery::enterZealMode() {
if (!isEnabled()) {
return;
}
MOZ_ASSERT(isEmpty());
decommitTask->join();
AutoEnterOOMUnsafeRegion oomUnsafe;
if (isSubChunkMode()) {
{
if (!chunk(0).markPagesInUseHard(ChunkSize)) {
oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode");
}
}
// It'd be simpler to poison the whole chunk, but we can't do that
// because the nursery might be partially used.
chunk(0).poisonRange(capacity_, ChunkSize, JS_FRESH_NURSERY_PATTERN,
MemCheckKind::MakeUndefined);
}
setCapacity(maxSpaceSize());
size_t nchunks = toSpace.maxChunkCount_ + fromSpace.maxChunkCount_;
if (!decommitTask->reserveSpaceForChunks(nchunks)) {
oomUnsafe.crash("Nursery::enterZealMode");
}
setCurrentEnd();
}
void js::Nursery::leaveZealMode() {
if (!isEnabled()) {
return;
}
MOZ_ASSERT(isEmpty());
// Reset the nursery size.
setCapacity(minSpaceSize());
toSpace.moveToStartOfChunk(this, 0);
toSpace.setStartToCurrentPosition();
if (semispaceEnabled_) {
fromSpace.moveToStartOfChunk(this, 0);
fromSpace.setStartToCurrentPosition();
}
poisonAndInitCurrentChunk();
}
#endif // JS_GC_ZEAL
void* js::Nursery::allocateCell(gc::AllocSite* site, size_t size,
JS::TraceKind kind) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
void* ptr = tryAllocateCell(site, size, kind);
if (MOZ_LIKELY(ptr)) {
return ptr;
}
if (handleAllocationFailure() != JS::GCReason::NO_REASON) {
return nullptr;
}
ptr = tryAllocateCell(site, size, kind);
MOZ_ASSERT(ptr);
return ptr;
}
inline void* js::Nursery::allocate(size_t size) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
void* ptr = tryAllocate(size);
if (MOZ_LIKELY(ptr)) {
return ptr;
}
if (handleAllocationFailure() != JS::GCReason::NO_REASON) {
return nullptr;
}
ptr = tryAllocate(size);
MOZ_ASSERT(ptr);
return ptr;
}
MOZ_NEVER_INLINE JS::GCReason Nursery::handleAllocationFailure() {
if (minorGCRequested()) {
// If a minor GC was requested then fail the allocation. The collection is
// then run in GCRuntime::tryNewNurseryCell.
return minorGCTriggerReason_;
}
if (!moveToNextChunk()) {
return JS::GCReason::OUT_OF_NURSERY;
}
return JS::GCReason::NO_REASON;
}
bool Nursery::moveToNextChunk() {
unsigned chunkno = currentChunk() + 1;
MOZ_ASSERT(chunkno <= maxChunkCount());
MOZ_ASSERT(chunkno <= allocatedChunkCount());
if (chunkno == maxChunkCount()) {
return false;
}
if (chunkno == allocatedChunkCount()) {
TimeStamp start = TimeStamp::Now();
{
AutoLockGCBgAlloc lock(gc);
if (!allocateNextChunk(lock)) {
return false;
}
}
timeInChunkAlloc_ += TimeStamp::Now() - start;
MOZ_ASSERT(chunkno < allocatedChunkCount());
}
moveToStartOfChunk(chunkno);
poisonAndInitCurrentChunk();
return true;
}
std::tuple<void*, bool> js::Nursery::allocateBuffer(Zone* zone, size_t nbytes,
arena_id_t arenaId) {
MOZ_ASSERT(nbytes > 0);
MOZ_ASSERT(nbytes <= SIZE_MAX - gc::CellAlignBytes);
nbytes = RoundUp(nbytes, gc::CellAlignBytes);
if (nbytes <= MaxNurseryBufferSize) {
void* buffer = allocate(nbytes);
if (buffer) {
return {buffer, false};
}
}
void* buffer = zone->pod_arena_malloc<uint8_t>(arenaId, nbytes);
return {buffer, bool(buffer)};
}
void* js::Nursery::tryAllocateNurseryBuffer(JS::Zone* zone, size_t nbytes,
arena_id_t arenaId) {
MOZ_ASSERT(nbytes > 0);
MOZ_ASSERT(nbytes <= SIZE_MAX - gc::CellAlignBytes);
nbytes = RoundUp(nbytes, gc::CellAlignBytes);
if (nbytes <= MaxNurseryBufferSize) {
return allocate(nbytes);
}
return nullptr;
}
void* js::Nursery::allocateBuffer(Zone* zone, Cell* owner, size_t nbytes,
arena_id_t arenaId) {
MOZ_ASSERT(owner);
MOZ_ASSERT(nbytes > 0);
if (!IsInsideNursery(owner)) {
return zone->pod_arena_malloc<uint8_t>(arenaId, nbytes);
}
auto [buffer, isMalloced] = allocateBuffer(zone, nbytes, arenaId);
if (isMalloced && !registerMallocedBuffer(buffer, nbytes)) {
js_free(buffer);
return nullptr;
}
return buffer;
}
void* js::Nursery::allocateBufferSameLocation(Cell* owner, size_t nbytes,
arena_id_t arenaId) {
MOZ_ASSERT(owner);
MOZ_ASSERT(nbytes > 0);
MOZ_ASSERT(nbytes <= MaxNurseryBufferSize);
if (!IsInsideNursery(owner)) {
return owner->asTenured().zone()->pod_arena_malloc<uint8_t>(arenaId,
nbytes);
}
return allocate(nbytes);
}
std::tuple<void*, bool> js::Nursery::allocateZeroedBuffer(Zone* zone,
size_t nbytes,
arena_id_t arena) {
MOZ_ASSERT(nbytes > 0);
if (nbytes <= MaxNurseryBufferSize) {
void* buffer = allocate(nbytes);
if (buffer) {
memset(buffer, 0, nbytes);
return {buffer, false};
}
}
void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes);
return {buffer, bool(buffer)};
}
void* js::Nursery::allocateZeroedBuffer(Cell* owner, size_t nbytes,
arena_id_t arena) {
MOZ_ASSERT(owner);
MOZ_ASSERT(nbytes > 0);
if (!IsInsideNursery(owner)) {
return owner->asTenured().zone()->pod_arena_calloc<uint8_t>(arena, nbytes);
}
auto [buffer, isMalloced] =
allocateZeroedBuffer(owner->nurseryZone(), nbytes, arena);
if (isMalloced && !registerMallocedBuffer(buffer, nbytes)) {
js_free(buffer);
return nullptr;
}
return buffer;
}
void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer,
size_t oldBytes, size_t newBytes,
arena_id_t arena) {
if (!IsInsideNursery(cell)) {
MOZ_ASSERT(!isInside(oldBuffer));
return zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
}
if (!isInside(oldBuffer)) {
MOZ_ASSERT(toSpace.mallocedBufferBytes >= oldBytes);
void* newBuffer =
zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
if (newBuffer) {
if (oldBuffer != newBuffer) {
MOZ_ALWAYS_TRUE(
toSpace.mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
}
toSpace.mallocedBufferBytes -= oldBytes;
toSpace.mallocedBufferBytes += newBytes;
}
return newBuffer;
}
// The nursery cannot make use of the returned slots data.
if (newBytes < oldBytes) {
return oldBuffer;
}
auto newBuffer = allocateBuffer(zone, cell, newBytes, js::MallocArena);
if (newBuffer) {
PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
}
return newBuffer;
}
void js::Nursery::freeBuffer(void* buffer, size_t nbytes) {
if (!isInside(buffer)) {
removeMallocedBuffer(buffer, nbytes);
js_free(buffer);
}
}
#ifdef DEBUG
/* static */
inline bool Nursery::checkForwardingPointerInsideNursery(void* ptr) {
// If a zero-capacity elements header lands right at the end of a chunk then
// elements data will appear to be in the next chunk. If we have a pointer to
// the very start of a chunk, check the previous chunk.
if ((uintptr_t(ptr) & ChunkMask) == 0) {
return isInside(reinterpret_cast<uint8_t*>(ptr) - 1);
}
return isInside(ptr);
}
#endif
void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
MOZ_ASSERT(checkForwardingPointerInsideNursery(oldData));
// |newData| may be either in the nursery or in the malloc heap.
AutoEnterOOMUnsafeRegion oomUnsafe;
#ifdef DEBUG
if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) {
MOZ_ASSERT(p->value() == newData);
}
#endif
if (!forwardedBuffers.put(oldData, newData)) {
oomUnsafe.crash("Nursery::setForwardingPointer");
}
}
#ifdef DEBUG
static bool IsWriteableAddress(void* ptr) {
auto* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
*vPtr = *vPtr;
return true;
}
#endif
void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) {
// Read the current pointer value which may be one of:
// - Non-nursery pointer
// - Nursery-allocated buffer
// - A BufferRelocationOverlay inside the nursery
//
// Note: The buffer has already be relocated. We are just patching stale
// pointers now.
auto* buffer = reinterpret_cast<void*>(*pSlotsElems);
if (!isInside(buffer)) {
return;
}
// The new location for this buffer is either stored inline with it or in
// the forwardedBuffers table.
if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(buffer)) {
buffer = p->value();
// It's not valid to assert IsWriteableAddress for indirect forwarding
// pointers because the size of the allocation could be less than a word.
} else {
BufferRelocationOverlay* reloc =
static_cast<BufferRelocationOverlay*>(buffer);
buffer = *reloc;
MOZ_ASSERT(IsWriteableAddress(buffer));
}
MOZ_ASSERT_IF(isInside(buffer), !inCollectedRegion(buffer));
*pSlotsElems = reinterpret_cast<uintptr_t>(buffer);
}
inline double js::Nursery::calcPromotionRate(bool* validForTenuring) const {
MOZ_ASSERT(validForTenuring);
if (previousGC.nurseryUsedBytes == 0) {
*validForTenuring = false;
return 0.0;
}
double used = double(previousGC.nurseryUsedBytes);
double capacity = double(previousGC.nurseryCapacity);
double tenured = double(previousGC.tenuredBytes);
// We should only use the promotion rate to make tenuring decisions if it's
// likely to be valid. The criterion we use is that the nursery was at least
// 90% full.
*validForTenuring = used > capacity * 0.9;
MOZ_ASSERT(tenured <= used);
return tenured / used;
}
void js::Nursery::renderProfileJSON(JSONPrinter& json) const {
if (!isEnabled()) {
json.beginObject();
json.property("status", "nursery disabled");
json.endObject();
return;
}
if (previousGC.reason == JS::GCReason::NO_REASON) {
// If the nursery was empty when the last minorGC was requested, then
// no nursery collection will have been performed but JSON may still be
// requested. (And as a public API, this function should not crash in
// such a case.)
json.beginObject();
json.property("status", "nursery empty");
json.endObject();
return;
}
json.beginObject();
json.property("status", "complete");
json.property("reason", JS::ExplainGCReason(previousGC.reason));
json.property("bytes_tenured", previousGC.tenuredBytes);
json.property("cells_tenured", previousGC.tenuredCells);
json.property("strings_tenured",
stats().getStat(gcstats::STAT_STRINGS_TENURED));
json.property("strings_deduplicated",
stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED));
json.property("bigints_tenured",
stats().getStat(gcstats::STAT_BIGINTS_TENURED));
json.property("bytes_used", previousGC.nurseryUsedBytes);
json.property("cur_capacity", previousGC.nurseryCapacity);
const size_t newCapacity = capacity();
if (newCapacity != previousGC.nurseryCapacity) {
json.property("new_capacity", newCapacity);
}
if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) {
json.property("lazy_capacity", previousGC.nurseryCommitted);
}
if (!timeInChunkAlloc_.IsZero()) {
json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS);
}
// These counters only contain consistent data if the profiler is enabled,
// and then there's no guarentee.
if (runtime()->geckoProfiler().enabled()) {
json.property("cells_allocated_nursery",
pretenuringNursery.totalAllocCount());
json.property("cells_allocated_tenured",
stats().allocsSinceMinorGCTenured());
}
json.beginObjectProperty("phase_times");
#define EXTRACT_NAME(name, text) #name,
static const char* const names[] = {
FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
#undef EXTRACT_NAME
""};
size_t i = 0;
for (auto time : profileDurations_) {
json.property(names[i++], time, json.MICROSECONDS);
}
json.endObject(); // timings value
json.endObject();
}
// The following macros define nursery GC profile metadata fields that are
// printed before the timing information defined by
// FOR_EACH_NURSERY_PROFILE_TIME.
#define FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
_("PID", 7, "%7zu", pid) \
_("Runtime", 14, "0x%12p", runtime)
#define FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_) \
_("Timestamp", 10, "%10.6f", timestamp.ToSeconds()) \
_("Reason", 20, "%-20.20s", reasonStr) \
_("PRate", 6, "%5.1f%%", promotionRatePercent) \
_("OldKB", 6, "%6zu", oldSizeKB) \
_("NewKB", 6, "%6zu", newSizeKB) \
_("Dedup", 6, "%6zu", dedupCount)
#define FOR_EACH_NURSERY_PROFILE_METADATA(_) \
FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_)
void js::Nursery::printCollectionProfile(JS::GCReason reason,
double promotionRate) {
stats().maybePrintProfileHeaders();
Sprinter sprinter;
if (!sprinter.init()) {
return;
}
sprinter.put(gcstats::MinorGCProfilePrefix);
size_t pid = getpid();
JSRuntime* runtime = gc->rt;
TimeDuration timestamp = collectionStartTime() - stats().creationTime();
const char* reasonStr = ExplainGCReason(reason);
double promotionRatePercent = promotionRate * 100;
size_t oldSizeKB = previousGC.nurseryCapacity / 1024;
size_t newSizeKB = capacity() / 1024;
size_t dedupCount = stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED);
#define PRINT_FIELD_VALUE(_1, _2, format, value) \
sprinter.printf(" " format, value);
FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_VALUE)
#undef PRINT_FIELD_VALUE
printProfileDurations(profileDurations_, sprinter);
JS::UniqueChars str = sprinter.release();
if (!str) {
return;
}
fputs(str.get(), stats().profileFile());
}
void js::Nursery::printProfileHeader() {
Sprinter sprinter;
if (!sprinter.init()) {
return;
}
sprinter.put(gcstats::MinorGCProfilePrefix);
#define PRINT_FIELD_NAME(name, width, _1, _2) \
sprinter.printf(" %-*s", width, name);
FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_NAME)
#undef PRINT_FIELD_NAME
#define PRINT_PROFILE_NAME(_1, text) sprinter.printf(" %-6.6s", text);
FOR_EACH_NURSERY_PROFILE_TIME(PRINT_PROFILE_NAME)
#undef PRINT_PROFILE_NAME
sprinter.put("\n");
JS::UniqueChars str = sprinter.release();
if (!str) {
return;
}
fputs(str.get(), stats().profileFile());
}
// static
void js::Nursery::printProfileDurations(const ProfileDurations& times,
Sprinter& sprinter) {
for (auto time : times) {
int64_t micros = int64_t(time.ToMicroseconds());
sprinter.printf(" %6" PRIi64, micros);
}
sprinter.put("\n");
}
static constexpr size_t NurserySliceMetadataFormatWidth() {
size_t fieldCount = 0;
size_t totalWidth = 0;
#define UPDATE_COUNT_AND_WIDTH(_1, width, _2, _3) \
fieldCount++; \
totalWidth += width;
FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(UPDATE_COUNT_AND_WIDTH)
#undef UPDATE_COUNT_AND_WIDTH
// Add padding between fields.
totalWidth += fieldCount - 1;
return totalWidth;
}
void js::Nursery::printTotalProfileTimes() {
if (!enableProfiling_) {
return;
}
Sprinter sprinter;
if (!sprinter.init()) {
return;
}
sprinter.put(gcstats::MinorGCProfilePrefix);
size_t pid = getpid();
JSRuntime* runtime = gc->rt;
char collections[32];
DebugOnly<int> r = SprintfLiteral(
collections, "TOTALS: %7" PRIu64 " collections:", gc->minorGCCount());
MOZ_ASSERT(r > 0 && r < int(sizeof(collections)));
#define PRINT_FIELD_VALUE(_1, _2, format, value) \
sprinter.printf(" " format, value);
FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(PRINT_FIELD_VALUE)
#undef PRINT_FIELD_VALUE
// Use whole width of per-slice metadata to print total slices so the profile
// totals that follow line up.
size_t width = NurserySliceMetadataFormatWidth();
sprinter.printf(" %-*s", int(width), collections);
printProfileDurations(totalDurations_, sprinter);
JS::UniqueChars str = sprinter.release();
if (!str) {
return;
}
fputs(str.get(), stats().profileFile());
}
void js::Nursery::maybeClearProfileDurations() {
for (auto& duration : profileDurations_) {
duration = mozilla::TimeDuration::Zero();
}
}
inline void js::Nursery::startProfile(ProfileKey key) {
startTimes_[key] = TimeStamp::Now();
}
inline void js::Nursery::endProfile(ProfileKey key) {
profileDurations_[key] = TimeStamp::Now() - startTimes_[key];
totalDurations_[key] += profileDurations_[key];
}
inline TimeStamp js::Nursery::collectionStartTime() const {
return startTimes_[ProfileKey::Total];
}
TimeStamp js::Nursery::lastCollectionEndTime() const {
return previousGC.endTime;
}
bool js::Nursery::wantEagerCollection() const {
if (!isEnabled()) {
return false;
}
if (isEmpty() && capacity() == minSpaceSize()) {
return false;
}
if (minorGCRequested()) {
return true;
}
if (freeSpaceIsBelowEagerThreshold()) {
return true;
}
// If the nursery is not being collected often then it may be taking up more
// space than necessary.
return isUnderused();
}
inline bool js::Nursery::freeSpaceIsBelowEagerThreshold() const {
// The threshold is specified in terms of free space so that it doesn't depend
// on the size of the nursery.
//
// There two thresholds, an absolute free bytes threshold and a free space
// fraction threshold. Two thresholds are used so that we don't collect too
// eagerly for small nurseries (or even all the time if nursery size is less
// than the free bytes threshold) or too eagerly for large nurseries (where a
// fractional threshold may leave a significant amount of nursery unused).
//
// Since the aim is making this less eager we require both thresholds to be
// met.
size_t freeBytes = freeSpace();
double freeFraction = double(freeBytes) / double(capacity());
size_t bytesThreshold = tunables().nurseryEagerCollectionThresholdBytes();
double fractionThreshold =
tunables().nurseryEagerCollectionThresholdPercent();
return freeBytes < bytesThreshold && freeFraction < fractionThreshold;
}
inline bool js::Nursery::isUnderused() const {
if (js::SupportDifferentialTesting() || !previousGC.endTime) {
return false;
}
if (capacity() == minSpaceSize()) {
return false;
}
// If the nursery is above its minimum size, collect it every so often if we
// have idle time. This allows the nursery to shrink when it's not being
// used. There are other heuristics we could use for this, but this is the
// simplest.
TimeDuration timeSinceLastCollection =
TimeStamp::NowLoRes() - previousGC.endTime;
return timeSinceLastCollection > tunables().nurseryEagerCollectionTimeout();
}
void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
JSRuntime* rt = runtime();
MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
if (minorGCRequested()) {
MOZ_ASSERT(position() == chunk(currentChunk()).end());
toSpace.position_ = prevPosition_;
prevPosition_ = 0;
minorGCTriggerReason_ = JS::GCReason::NO_REASON;
rt->mainContextFromOwnThread()->clearPendingInterrupt(
InterruptReason::MinorGC);
}
if (!isEnabled() || isEmpty()) {
// Our barriers are not always exact, and there may be entries in the
// storebuffer even when the nursery is disabled or empty. It's not safe
// to keep these entries as they may refer to tenured cells which may be
// freed after this point.
gc->storeBuffer().clear();
MOZ_ASSERT_IF(!semispaceEnabled_, !pretenuringNursery.hasAllocatedSites());
}
if (!isEnabled()) {
return;
}
AutoGCSession session(gc, JS::HeapState::MinorCollecting);
stats().beginNurseryCollection();
gcprobes::MinorGCStart();
gc->callNurseryCollectionCallbacks(
JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START, reason);
maybeClearProfileDurations();
startProfile(ProfileKey::Total);
previousGC.reason = JS::GCReason::NO_REASON;
previousGC.nurseryUsedBytes = usedSpace();
previousGC.nurseryCapacity = capacity();
previousGC.nurseryCommitted = totalCommitted();
previousGC.nurseryUsedChunkCount = currentChunk() + 1;
previousGC.tenuredBytes = 0;
previousGC.tenuredCells = 0;
tenuredEverything = true;
// If it isn't empty, it will call doCollection, and possibly after that
// isEmpty() will become true, so use another variable to keep track of the
// old empty state.
bool wasEmpty = isEmpty();
if (!wasEmpty) {
CollectionResult result = doCollection(session, options, reason);
// Don't include chunk headers when calculating nursery space, since this
// space does not represent data that can be tenured
MOZ_ASSERT(result.tenuredBytes <=
(previousGC.nurseryUsedBytes -
(NurseryChunkHeaderSize * previousGC.nurseryUsedChunkCount)));
previousGC.reason = reason;
previousGC.tenuredBytes = result.tenuredBytes;
previousGC.tenuredCells = result.tenuredCells;
previousGC.nurseryUsedChunkCount = currentChunk() + 1;
}
// Resize the nursery.
maybeResizeNursery(options, reason);
if (!semispaceEnabled()) {
poisonAndInitCurrentChunk();
}
bool validPromotionRate;
const double promotionRate = calcPromotionRate(&validPromotionRate);
startProfile(ProfileKey::Pretenure);
size_t sitesPretenured = 0;
sitesPretenured =
doPretenuring(rt, reason, validPromotionRate, promotionRate);
endProfile(ProfileKey::Pretenure);
previousGC.endTime =
TimeStamp::Now(); // Must happen after maybeResizeNursery.
endProfile(ProfileKey::Total);
gc->incMinorGcNumber();
TimeDuration totalTime = profileDurations_[ProfileKey::Total];
sendTelemetry(reason, totalTime, wasEmpty, promotionRate, sitesPretenured);
gc->callNurseryCollectionCallbacks(
JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END, reason);
stats().endNurseryCollection();
gcprobes::MinorGCEnd();
timeInChunkAlloc_ = mozilla::TimeDuration::Zero();
js::StringStats prevStats = gc->stringStats;
js::StringStats& currStats = gc->stringStats;
currStats = js::StringStats();
for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
currStats += zone->stringStats;
zone->previousGCStringStats = zone->stringStats;
}
stats().setStat(
gcstats::STAT_STRINGS_DEDUPLICATED,
currStats.deduplicatedStrings - prevStats.deduplicatedStrings);
if (ShouldPrintProfile(runtime(), enableProfiling_, profileWorkers_,
profileThreshold_, totalTime)) {
printCollectionProfile(reason, promotionRate);
}
if (reportDeduplications_) {
printDeduplicationData(prevStats, currStats);
}
}
void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime,
bool wasEmpty, double promotionRate,
size_t sitesPretenured) {
JSRuntime* rt = runtime();
rt->metrics().GC_MINOR_REASON(uint32_t(reason));
// Long minor GCs are those that take more than 1ms.
bool wasLongMinorGC = totalTime.ToMilliseconds() > 1.0;
if (wasLongMinorGC) {
rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason));
}
rt->metrics().GC_MINOR_US(totalTime);
rt->metrics().GC_NURSERY_BYTES_2(totalCommitted());
if (!wasEmpty) {
rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured);
rt->metrics().GC_NURSERY_PROMOTION_RATE(promotionRate * 100);
}
}
void js::Nursery::printDeduplicationData(js::StringStats& prev,
js::StringStats& curr) {
if (curr.deduplicatedStrings > prev.deduplicatedStrings) {
fprintf(stderr,
"pid %zu: deduplicated %" PRIi64 " strings, %" PRIu64
" chars, %" PRIu64 " malloc bytes\n",
size_t(getpid()),
curr.deduplicatedStrings - prev.deduplicatedStrings,
curr.deduplicatedChars - prev.deduplicatedChars,
curr.deduplicatedBytes - prev.deduplicatedBytes);
}
}
void js::Nursery::freeTrailerBlocks(JS::GCOptions options,
JS::GCReason reason) {
fromSpace.freeTrailerBlocks(mallocedBlockCache_);
if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) {
mallocedBlockCache_.clear();
return;
}
// Discard blocks from the cache at 0.05% per megabyte of nursery capacity,
// that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache
// to gradually discard unneeded blocks in long running applications.
mallocedBlockCache_.preen(0.05 * double(capacity()) / (1024.0 * 1024.0));
}
void js::Nursery::Space::freeTrailerBlocks(
MallocedBlockCache& mallocedBlockCache) {
// This routine frees those blocks denoted by the set
//
// trailersAdded_ (all of it)
// - trailersRemoved_ (entries with index below trailersRemovedUsed_)
//
// For each block, places it back on the nursery's small-malloced-block pool
// by calling mallocedBlockCache.free.
MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
MOZ_ASSERT(trailersRemovedUsed_ <= trailersRemoved_.length());
// Sort the removed entries.
std::sort(trailersRemoved_.begin(),
trailersRemoved_.begin() + trailersRemovedUsed_,
[](const void* block1, const void* block2) {
return uintptr_t(block1) < uintptr_t(block2);
});
// Use one of two schemes to enumerate the set subtraction.
if (trailersRemovedUsed_ < 1000) {
// If the number of removed items is relatively small, it isn't worth the
// cost of sorting `trailersAdded_`. Instead, walk through the vector in
// whatever order it is and use binary search to establish whether each
// item is present in trailersRemoved_[0 .. trailersRemovedUsed_ - 1].
const size_t nAdded = trailersAdded_.length();
for (size_t i = 0; i < nAdded; i++) {
const PointerAndUint7 block = trailersAdded_[i];
const void* blockPointer = block.pointer();
if (!std::binary_search(trailersRemoved_.begin(),
trailersRemoved_.begin() + trailersRemovedUsed_,
blockPointer)) {
mallocedBlockCache.free(block);
}
}
} else {
// The general case, which is algorithmically safer for large inputs.
// Sort the added entries, and then walk through both them and the removed
// entries in lockstep.
std::sort(trailersAdded_.begin(), trailersAdded_.end(),
[](const PointerAndUint7& block1, const PointerAndUint7& block2) {
return uintptr_t(block1.pointer()) <
uintptr_t(block2.pointer());
});
// Enumerate the set subtraction. This is somewhat simplified by the fact
// that all elements of the removed set must also be present in the added
// set. (the "inclusion property").
const size_t nAdded = trailersAdded_.length();
const size_t nRemoved = trailersRemovedUsed_;
size_t iAdded;
size_t iRemoved = 0;
for (iAdded = 0; iAdded < nAdded; iAdded++) {
if (iRemoved == nRemoved) {
// We've run out of items to skip, so move on to the next loop.
break;
}
const PointerAndUint7 blockAdded = trailersAdded_[iAdded];
const void* blockRemoved = trailersRemoved_[iRemoved];
if (blockAdded.pointer() < blockRemoved) {
mallocedBlockCache.free(blockAdded);
continue;
}
// If this doesn't hold
// (that is, if `blockAdded.pointer() > blockRemoved`),
// then the abovementioned inclusion property doesn't hold.
MOZ_RELEASE_ASSERT(blockAdded.pointer() == blockRemoved);
iRemoved++;
}
MOZ_ASSERT(iRemoved == nRemoved);
// We've used up the removed set, so now finish up the remainder of the
// added set.
for (/*keep going*/; iAdded < nAdded; iAdded++) {
const PointerAndUint7 block = trailersAdded_[iAdded];
mallocedBlockCache.free(block);
}
}
// And empty out both sets, but preserve the underlying storage.
trailersAdded_.clear();
trailersRemoved_.clear();
trailersRemovedUsed_ = 0;
trailerBytes_ = 0;
}
size_t Nursery::sizeOfTrailerBlockSets(
mozilla::MallocSizeOf mallocSizeOf) const {
MOZ_ASSERT(fromSpace.trailersAdded_.empty());
MOZ_ASSERT(fromSpace.trailersRemoved_.empty());
return toSpace.trailersAdded_.sizeOfExcludingThis(mallocSizeOf) +
toSpace.trailersRemoved_.sizeOfExcludingThis(mallocSizeOf);
}
js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
JS::GCOptions options,
JS::GCReason reason) {
JSRuntime* rt = runtime();
AutoSetThreadIsPerformingGC performingGC(rt->gcContext());
AutoStopVerifyingBarriers av(rt, false);
AutoDisableProxyCheck disableStrictProxyChecking;
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
// Swap nursery spaces.
swapSpaces();
MOZ_ASSERT(toSpace.isEmpty());
MOZ_ASSERT(toSpace.mallocedBuffers.empty());
if (semispaceEnabled_) {
poisonAndInitCurrentChunk();
}
clearMapAndSetNurseryRanges();
// Move objects pointed to by roots from the nursery to the major heap.
tenuredEverything = shouldTenureEverything(reason);
TenuringTracer mover(rt, this, tenuredEverything);
#ifdef JS_GC_ZEAL
if (reportPromotion_) {
mover.initPromotionReport();
}
#endif
// Trace everything considered as a root by a minor GC.
traceRoots(session, mover);
startProfile(ProfileKey::SweepCaches);
gc->purgeRuntimeForMinorGC();
endProfile(ProfileKey::SweepCaches);
// Most of the work is done here. This loop iterates over objects that have
// been moved to the major heap. If these objects have any outgoing pointers
// to the nursery, then those nursery objects get moved as well, until no
// objects are left to move. That is, we iterate to a fixed point.
startProfile(ProfileKey::CollectToObjFP);
mover.collectToObjectFixedPoint();
endProfile(ProfileKey::CollectToObjFP);
startProfile(ProfileKey::CollectToStrFP);
mover.collectToStringFixedPoint();
endProfile(ProfileKey::CollectToStrFP);
#ifdef JS_GC_ZEAL
if (reportPromotion_ && options != JS::GCOptions::Shutdown) {
JSContext* cx = runtime()->mainContextFromOwnThread();
JS::AutoAssertNoGC nogc(cx);
mover.printPromotionReport(cx, reason, nogc);
}
#endif
// Sweep to update any pointers to nursery objects that have now been
// tenured.
startProfile(ProfileKey::Sweep);
sweep();
endProfile(ProfileKey::Sweep);
// Update any slot or element pointers whose destination has been tenured.
startProfile(ProfileKey::UpdateJitActivations);
js::jit::UpdateJitActivationsForMinorGC(rt);
forwardedBuffers.clearAndCompact();
endProfile(ProfileKey::UpdateJitActivations);
startProfile(ProfileKey::ObjectsTenuredCallback);
gc->callObjectsTenuredCallback();
endProfile(ProfileKey::ObjectsTenuredCallback);
// Sweep.
startProfile(ProfileKey::FreeMallocedBuffers);
gc->queueBuffersForFreeAfterMinorGC(fromSpace.mallocedBuffers);
fromSpace.mallocedBufferBytes = 0;
endProfile(ProfileKey::FreeMallocedBuffers);
// Give trailer blocks associated with non-tenured Wasm{Struct,Array}Objects
// back to our `mallocedBlockCache_`.
startProfile(ProfileKey::FreeTrailerBlocks);
freeTrailerBlocks(options, reason);
endProfile(ProfileKey::FreeTrailerBlocks);
startProfile(ProfileKey::ClearNursery);
clear();
endProfile(ProfileKey::ClearNursery);
// Purge the StringToAtomCache. This has to happen at the end because the
// cache is used when tenuring strings.
startProfile(ProfileKey::PurgeStringToAtomCache);
runtime()->caches().stringToAtomCache.purge();
endProfile(ProfileKey::PurgeStringToAtomCache);
// Make sure hashtables have been updated after the collection.
startProfile(ProfileKey::CheckHashTables);
#ifdef JS_GC_ZEAL
if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) {
runtime()->caches().checkEvalCacheAfterMinorGC();
gc->checkHashTablesAfterMovingGC();
}
#endif
endProfile(ProfileKey::CheckHashTables);
if (semispaceEnabled_) {
// On the next collection, tenure everything before |tenureThreshold_|.
tenureThreshold_ = toSpace.offsetFromExclusiveAddress(position());
} else {
// Swap nursery spaces back because we only use one.
swapSpaces();
MOZ_ASSERT(toSpace.isEmpty());
}
MOZ_ASSERT(fromSpace.isEmpty());
if (semispaceEnabled_) {
poisonAndInitCurrentChunk();
}
return {mover.getPromotedSize(), mover.getPromotedCells()};
}
void js::Nursery::swapSpaces() {
std::swap(toSpace, fromSpace);
toSpace.setKind(ChunkKind::NurseryToSpace);
fromSpace.setKind(ChunkKind::NurseryFromSpace);
}
void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
{
// Suppress the sampling profiler to prevent it observing moved functions.
AutoSuppressProfilerSampling suppressProfiler(
runtime()->mainContextFromOwnThread());
// Trace the store buffer, which must happen first.
// Create an empty store buffer on the stack and swap it with the main store
// buffer, clearing it.
StoreBuffer sb(runtime());
{
AutoEnterOOMUnsafeRegion oomUnsafe;
if (!sb.enable()) {
oomUnsafe.crash("Nursery::traceRoots");
}
}
std::swap(sb, gc->storeBuffer());
MOZ_ASSERT(gc->storeBuffer().isEnabled());
MOZ_ASSERT(gc->storeBuffer().isEmpty());
startProfile(ProfileKey::TraceWholeCells);
sb.traceWholeCells(mover);
endProfile(ProfileKey::TraceWholeCells);
cellsToSweep = sb.releaseCellSweepSet();
startProfile(ProfileKey::TraceValues);
sb.traceValues(mover);
endProfile(ProfileKey::TraceValues);
startProfile(ProfileKey::TraceWasmAnyRefs);
sb.traceWasmAnyRefs(mover);
endProfile(ProfileKey::TraceWasmAnyRefs);
startProfile(ProfileKey::TraceCells);
sb.traceCells(mover);
endProfile(ProfileKey::TraceCells);
startProfile(ProfileKey::TraceSlots);
sb.traceSlots(mover);
endProfile(ProfileKey::TraceSlots);
startProfile(ProfileKey::TraceGenericEntries);
sb.traceGenericEntries(&mover);
endProfile(ProfileKey::TraceGenericEntries);
startProfile(ProfileKey::MarkRuntime);
gc->traceRuntimeForMinorGC(&mover, session);
endProfile(ProfileKey::MarkRuntime);
}
startProfile(ProfileKey::MarkDebugger);
{
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
DebugAPI::traceAllForMovingGC(&mover);
}
endProfile(ProfileKey::MarkDebugger);
}
bool js::Nursery::shouldTenureEverything(JS::GCReason reason) {
if (!semispaceEnabled()) {
return true;
}
return reason == JS::GCReason::EVICT_NURSERY ||
reason == JS::GCReason::DISABLE_GENERATIONAL_GC;
}
size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
bool validPromotionRate,
double promotionRate) {
size_t sitesPretenured = pretenuringNursery.doPretenuring(
gc, reason, validPromotionRate, promotionRate, pretenuringReportFilter_);
size_t zonesWhereStringsDisabled = 0;
size_t zonesWhereBigIntsDisabled = 0;
uint32_t numStringsTenured = 0;
uint32_t numBigIntsTenured = 0;
for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
bool disableNurseryStrings =
zone->allocNurseryStrings() &&
zone->unknownAllocSite(JS::TraceKind::String)->state() ==
AllocSite::State::LongLived;
bool disableNurseryBigInts =
zone->allocNurseryBigInts() &&
zone->unknownAllocSite(JS::TraceKind::BigInt)->state() ==
AllocSite::State::LongLived;
if (disableNurseryStrings || disableNurseryBigInts) {
if (disableNurseryStrings) {
zone->nurseryStringsDisabled = true;
zonesWhereStringsDisabled++;
}
if (disableNurseryBigInts) {
zone->nurseryBigIntsDisabled = true;
zonesWhereStringsDisabled++;
}
updateAllocFlagsForZone(zone);
}
}
stats().setStat(gcstats::STAT_STRINGS_TENURED, numStringsTenured);
stats().setStat(gcstats::STAT_BIGINTS_TENURED, numBigIntsTenured);
if (reportPretenuring() && zonesWhereStringsDisabled) {
fprintf(stderr,
"Pretenuring disabled nursery string allocation in %zu zones\n",
zonesWhereStringsDisabled);
}
if (reportPretenuring() && zonesWhereBigIntsDisabled) {
fprintf(stderr,
"Pretenuring disabled nursery big int allocation in %zu zones\n",
zonesWhereBigIntsDisabled);
}
return sitesPretenured;
}