Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
/*
* Implementation of GC sweeping.
*
* In the SpiderMonkey GC, 'sweeping' is used to mean two things:
* - updating data structures to remove pointers to dead GC things and updating
* pointers to moved GC things
* - finalizing dead GC things
*
* Furthermore, the GC carries out gray and weak marking after the start of the
* sweep phase. This is also implemented in this file.
*/
#include "mozilla/DebugOnly.h"
#include "mozilla/Maybe.h"
#include "mozilla/ScopeExit.h"
#include "mozilla/TimeStamp.h"
#include "builtin/FinalizationRegistryObject.h"
#include "builtin/WeakRefObject.h"
#include "debugger/DebugAPI.h"
#include "gc/AllocKind.h"
#include "gc/BufferAllocator.h"
#include "gc/FinalizationObservers.h"
#include "gc/GCInternals.h"
#include "gc/GCLock.h"
#include "gc/GCProbes.h"
#include "gc/GCRuntime.h"
#include "gc/ParallelWork.h"
#include "gc/Statistics.h"
#include "gc/TraceKind.h"
#include "gc/WeakMap.h"
#include "gc/Zone.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitZone.h"
#include "proxy/DeadObjectProxy.h"
#include "vm/BigIntType.h"
#include "vm/HelperThreads.h"
#include "vm/JSContext.h"
#include "vm/Time.h"
#include "vm/WrapperObject.h"
#include "gc/PrivateIterators-inl.h"
#include "vm/GeckoProfiler-inl.h"
#include "vm/JSObject-inl.h"
#include "vm/PropMap-inl.h"
#include "vm/Shape-inl.h"
#include "vm/StringType-inl.h"
using namespace js;
using namespace js::gc;
using mozilla::DebugOnly;
using mozilla::TimeStamp;
using JS::SliceBudget;
struct js::gc::FinalizePhase {
gcstats::PhaseKind statsPhase;
AllocKinds kinds;
};
/*
* Finalization order for objects swept incrementally on the main thread.
*/
static constexpr FinalizePhase ForegroundObjectFinalizePhase = {
gcstats::PhaseKind::FINALIZE_OBJECT,
{AllocKind::OBJECT0, AllocKind::OBJECT2, AllocKind::OBJECT4,
AllocKind::OBJECT8, AllocKind::OBJECT12, AllocKind::OBJECT16}};
/*
* Finalization order for GC things swept incrementally on the main thread.
*/
static constexpr FinalizePhase ForegroundNonObjectFinalizePhase = {
gcstats::PhaseKind::FINALIZE_NON_OBJECT,
{AllocKind::SCRIPT, AllocKind::JITCODE}};
/*
* Finalization order for GC things swept on the background thread.
*/
static constexpr FinalizePhase BackgroundFinalizePhases[] = {
{gcstats::PhaseKind::FINALIZE_OBJECT,
{AllocKind::FUNCTION, AllocKind::FUNCTION_EXTENDED,
AllocKind::OBJECT0_BACKGROUND, AllocKind::OBJECT2_BACKGROUND,
AllocKind::ARRAYBUFFER4, AllocKind::OBJECT4_BACKGROUND,
AllocKind::ARRAYBUFFER8, AllocKind::OBJECT8_BACKGROUND,
AllocKind::ARRAYBUFFER12, AllocKind::OBJECT12_BACKGROUND,
AllocKind::ARRAYBUFFER16, AllocKind::OBJECT16_BACKGROUND}},
{gcstats::PhaseKind::FINALIZE_NON_OBJECT,
{AllocKind::BUFFER16, AllocKind::BUFFER32, AllocKind::BUFFER64,
AllocKind::BUFFER128, AllocKind::SCOPE, AllocKind::REGEXP_SHARED,
AllocKind::FAT_INLINE_STRING, AllocKind::STRING,
AllocKind::EXTERNAL_STRING, AllocKind::FAT_INLINE_ATOM, AllocKind::ATOM,
AllocKind::SYMBOL, AllocKind::BIGINT, AllocKind::SHAPE,
AllocKind::BASE_SHAPE, AllocKind::GETTER_SETTER,
AllocKind::COMPACT_PROP_MAP, AllocKind::NORMAL_PROP_MAP,
AllocKind::DICT_PROP_MAP}}};
template <typename T>
inline size_t Arena::finalize(JS::GCContext* gcx, AllocKind thingKind,
size_t thingSize) {
/* Enforce requirements on size of T. */
MOZ_ASSERT(thingSize % CellAlignBytes == 0);
MOZ_ASSERT(thingSize >= MinCellSize);
MOZ_ASSERT(thingSize <= 255);
MOZ_ASSERT(allocated());
MOZ_ASSERT(thingKind == getAllocKind());
MOZ_ASSERT(thingSize == getThingSize());
MOZ_ASSERT(!onDelayedMarkingList_);
uint_fast16_t freeStart = firstThingOffset(thingKind);
// Update the free list as we go along. The cell iterator will always be ahead
// of this pointer when it is written through, so the write will not interfere
// with the iteration.
FreeSpan* newListTail = &firstFreeSpan;
size_t nmarked = 0;
size_t nfinalized = 0;
for (ArenaCellIterUnderFinalize cell(this); !cell.done(); cell.next()) {
T* t = cell.as<T>();
if (TenuredThingIsMarkedAny(t)) {
uint_fast16_t thing = uintptr_t(t) & ArenaMask;
if (thing != freeStart) {
// We just finished passing over one or more free things,
// so record a new FreeSpan.
newListTail->initBounds(freeStart, thing - thingSize, this);
newListTail = newListTail->nextSpanUnchecked(this);
}
freeStart = thing + thingSize;
nmarked++;
} else {
t->finalize(gcx);
AlwaysPoison(t, JS_SWEPT_TENURED_PATTERN, thingSize,
MemCheckKind::MakeUndefined);
gcprobes::TenuredFinalize(t);
nfinalized++;
}
}
if constexpr (std::is_same_v<T, JSObject> || std::is_same_v<T, JSString> ||
std::is_same_v<T, JS::BigInt>) {
if (isNewlyCreated_) {
zone()->pretenuring.updateCellCountsInNewlyCreatedArenas(
nmarked + nfinalized, nmarked);
}
}
isNewlyCreated_ = 0;
if (freeStart == ArenaSize) {
// If the last thing was marked, we will have already set the bounds of
// the final span, and we just need to terminate the list.
newListTail->initAsEmpty();
} else {
// Otherwise, end the list with a span that covers the final stretch of free
// things.
newListTail->initFinal(freeStart, ArenaSize - thingSize, this);
}
#ifdef DEBUG
size_t nfree = numFreeThings(thingSize);
MOZ_ASSERT(nfree + nmarked == thingsPerArena(thingKind));
#endif
return nmarked;
}
// Finalize arenas from src list, releasing empty arenas if keepArenas wasn't
// specified and inserting the others into the appropriate destination size
// bins.
template <typename T>
static inline bool FinalizeTypedArenas(JS::GCContext* gcx, ArenaList& src,
SortedArenaList& dest,
AllocKind thingKind,
SliceBudget& budget) {
MOZ_ASSERT(gcx->isFinalizing());
size_t thingSize = Arena::thingSize(thingKind);
size_t thingsPerArena = Arena::thingsPerArena(thingKind);
size_t markCount = 0;
auto updateMarkCount = mozilla::MakeScopeExit([&] {
GCRuntime* gc = &gcx->runtimeFromAnyThread()->gc;
gc->stats().addCount(gcstats::COUNT_CELLS_MARKED, markCount);
});
while (!src.isEmpty()) {
Arena* arena = src.popFront();
size_t nmarked = arena->finalize<T>(gcx, thingKind, thingSize);
size_t nfree = thingsPerArena - nmarked;
markCount += nmarked;
dest.insertAt(arena, nfree);
budget.step(thingsPerArena);
if (budget.isOverBudget()) {
return false;
}
}
return true;
}
/*
* Finalize the list of areans.
*/
static bool FinalizeArenas(JS::GCContext* gcx, ArenaList& src,
SortedArenaList& dest, AllocKind thingKind,
SliceBudget& budget) {
switch (thingKind) {
#define EXPAND_CASE(allocKind, traceKind, type, sizedType, bgFinal, nursery, \
compact) \
case AllocKind::allocKind: \
return FinalizeTypedArenas<type>(gcx, src, dest, thingKind, budget);
FOR_EACH_ALLOCKIND(EXPAND_CASE)
#undef EXPAND_CASE
default:
MOZ_CRASH("Invalid alloc kind");
}
}
void GCRuntime::initBackgroundSweep(Zone* zone, JS::GCContext* gcx,
const FinalizePhase& phase) {
gcstats::AutoPhase ap(stats(), phase.statsPhase);
for (auto kind : phase.kinds) {
zone->arenas.initBackgroundSweep(kind);
}
}
void ArenaLists::initBackgroundSweep(AllocKind thingKind) {
MOZ_ASSERT(IsBackgroundFinalized(thingKind));
MOZ_ASSERT(concurrentUse(thingKind) == ConcurrentUse::None);
if (!collectingArenaList(thingKind).isEmpty()) {
concurrentUse(thingKind) = ConcurrentUse::BackgroundFinalize;
}
}
void GCRuntime::backgroundFinalize(JS::GCContext* gcx, Zone* zone,
AllocKind kind, Arena** empty) {
MOZ_ASSERT(empty);
ArenaLists* lists = &zone->arenas;
ArenaList& arenas = lists->collectingArenaList(kind);
if (arenas.isEmpty()) {
MOZ_ASSERT(lists->concurrentUse(kind) == ArenaLists::ConcurrentUse::None);
return;
}
SortedArenaList finalizedSorted(kind);
auto unlimited = SliceBudget::unlimited();
FinalizeArenas(gcx, arenas, finalizedSorted, kind, unlimited);
MOZ_ASSERT(arenas.isEmpty());
finalizedSorted.extractEmptyTo(empty);
// When marking begins, all arenas are moved from arenaLists to
// collectingArenaLists. When the mutator runs, new arenas are allocated in
// arenaLists. Now that finalization is complete, we want to merge these lists
// back together.
// We must take the GC lock to be able to safely modify the ArenaList;
// however, this does not by itself make the changes visible to all threads,
// as not all threads take the GC lock to read the ArenaLists.
// That safety is provided by the ReleaseAcquire memory ordering of the
// background finalize state, which we explicitly set as the final step.
{
AutoLockGC lock(rt);
MOZ_ASSERT(lists->concurrentUse(kind) ==
ArenaLists::ConcurrentUse::BackgroundFinalize);
lists->mergeFinalizedArenas(kind, finalizedSorted);
}
lists->concurrentUse(kind) = ArenaLists::ConcurrentUse::None;
}
// After finalizing arenas, merge the following to get the final state of an
// arena list:
// - arenas allocated during marking
// - arenas allocated during sweeping
// - finalized arenas
void ArenaLists::mergeFinalizedArenas(AllocKind kind,
SortedArenaList& finalizedArenas) {
#ifdef DEBUG
// Updating arena lists off-thread requires taking the GC lock because the
// main thread uses these when allocating.
if (IsBackgroundFinalized(kind)) {
runtimeFromAnyThread()->gc.assertCurrentThreadHasLockedGC();
}
MOZ_ASSERT(collectingArenaList(kind).isEmpty());
#endif
arenaList(kind).prepend(finalizedArenas.convertToArenaList());
}
void ArenaLists::queueForegroundThingsForSweep() {
gcCompactPropMapArenasToUpdate =
collectingArenaList(AllocKind::COMPACT_PROP_MAP).first();
gcNormalPropMapArenasToUpdate =
collectingArenaList(AllocKind::NORMAL_PROP_MAP).first();
}
void GCRuntime::sweepBackgroundThings(ZoneList& zones) {
if (zones.isEmpty()) {
return;
}
JS::GCContext* gcx = TlsGCContext.get();
MOZ_ASSERT(gcx->isFinalizing());
// Sweep zones in order. The atoms zone must be finalized last as other
// zones may have direct pointers into it.
while (!zones.isEmpty()) {
Zone* zone = zones.removeFront();
MOZ_ASSERT(zone->isGCFinished());
TimeStamp startTime = TimeStamp::Now();
Arena* emptyArenas = zone->arenas.takeSweptEmptyArenas();
// We must finalize thing kinds in the order specified by
// BackgroundFinalizePhases.
for (const auto& phase : BackgroundFinalizePhases) {
for (auto kind : phase.kinds) {
backgroundFinalize(gcx, zone, kind, &emptyArenas);
}
}
// Release any arenas that are now empty.
//
// Empty arenas are only released after everything has been finalized so
// that it's still possible to get a thing's zone after the thing has been
// finalized. The HeapPtr destructor depends on this, and this allows
// HeapPtrs between things of different alloc kind regardless of
// finalization order.
//
// Batch releases so as to periodically drop and reaquire the GC lock every
// so often to avoid blocking the main thread from allocating arenas. This
// is important for allocation-heavy workloads such as the splay benchmark.
//
// This block is equivalent to calling GCRuntime::releaseArena on each arena
// individually.
static constexpr size_t BatchSize = 32;
while (emptyArenas) {
Arena* arenasToRelease[BatchSize];
size_t count = 0;
size_t gcHeapBytesFreed = 0;
size_t mallocHeapBytesFreed = 0;
{
mozilla::Maybe<AutoLockGC> maybeLock;
if (zone->isAtomsZone()) {
// Required to synchronize access to AtomMarkingRuntime.
maybeLock.emplace(this);
}
// Take up to BatchSize arenas from emptyArenas list.
for (size_t i = 0; emptyArenas && i < BatchSize; i++) {
Arena* arena = emptyArenas;
emptyArenas = arena->next;
if (IsBufferAllocKind(arena->getAllocKind())) {
mallocHeapBytesFreed += ArenaSize - arena->getFirstThingOffset();
} else {
gcHeapBytesFreed += ArenaSize;
}
arena->release(this, maybeLock.ptrOr(nullptr));
arenasToRelease[i] = arena;
count++;
}
}
zone->mallocHeapSize.removeBytes(mallocHeapBytesFreed, true);
zone->gcHeapSize.removeBytes(gcHeapBytesFreed, true, heapSize);
AutoLockGC lock(this);
for (size_t i = 0; i < count; i++) {
Arena* arena = arenasToRelease[i];
arena->chunk()->releaseArena(this, arena, lock);
}
}
// trivial finalizers, we could run this before it, potentially freeing more
// memory sooner. This could also happen in parallel with it.
zone->bufferAllocator.sweepForMajorCollection(shouldDecommit());
// Record time spent sweeping this zone.
TimeStamp endTime = TimeStamp::Now();
zone->perZoneGCTime += endTime - startTime;
}
}
void GCRuntime::assertBackgroundSweepingFinished() {
#ifdef DEBUG
{
AutoLockHelperThreadState lock;
MOZ_ASSERT(backgroundSweepZones.ref().isEmpty());
}
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
for (auto kind : AllAllocKinds()) {
MOZ_ASSERT_IF(!(state() >= State::Prepare && state() <= State::Sweep),
zone->arenas.collectingArenaList(kind).isEmpty());
MOZ_ASSERT(zone->arenas.doneBackgroundFinalize(kind));
}
}
#endif
}
void GCRuntime::queueZonesAndStartBackgroundSweep(ZoneList&& zones) {
{
AutoLockHelperThreadState lock;
MOZ_ASSERT(!requestSliceAfterBackgroundTask);
backgroundSweepZones.ref().appendList(std::move(zones));
if (useBackgroundThreads) {
sweepTask.startOrRunIfIdle(lock);
}
}
if (!useBackgroundThreads) {
sweepTask.join();
sweepTask.runFromMainThread();
}
}
BackgroundSweepTask::BackgroundSweepTask(GCRuntime* gc)
: GCParallelTask(gc, gcstats::PhaseKind::SWEEP, GCUse::Finalizing) {}
void BackgroundSweepTask::run(AutoLockHelperThreadState& lock) {
gc->sweepFromBackgroundThread(lock);
}
void GCRuntime::sweepFromBackgroundThread(AutoLockHelperThreadState& lock) {
do {
ZoneList zones;
zones.appendList(std::move(backgroundSweepZones.ref()));
AutoUnlockHelperThreadState unlock(lock);
sweepBackgroundThings(zones);
// The main thread may call queueZonesAndStartBackgroundSweep() while this
// is running so we must check there is no more work after releasing the
// lock.
} while (!backgroundSweepZones.ref().isEmpty());
maybeRequestGCAfterBackgroundTask(lock);
}
void GCRuntime::waitBackgroundSweepEnd() {
sweepTask.join();
if (state() != State::Sweep) {
assertBackgroundSweepingFinished();
}
}
void GCRuntime::waitBackgroundDecommitEnd() { decommitTask.join(); }
void GCRuntime::startBackgroundFree() {
AutoLockHelperThreadState lock;
if (!hasBuffersForBackgroundFree()) {
return;
}
freeTask.startOrRunIfIdle(lock);
}
BackgroundFreeTask::BackgroundFreeTask(GCRuntime* gc)
: GCParallelTask(gc, gcstats::PhaseKind::NONE) {
// This can occur outside GCs so doesn't have a stats phase.
}
void BackgroundFreeTask::run(AutoLockHelperThreadState& lock) {
gc->freeFromBackgroundThread(lock);
}
void GCRuntime::freeFromBackgroundThread(AutoLockHelperThreadState& lock) {
do {
LifoAlloc lifoBlocks(JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE,
js::BackgroundMallocArena);
lifoBlocks.transferFrom(&lifoBlocksToFree.ref());
Nursery::BufferSet buffers;
std::swap(buffers, buffersToFreeAfterMinorGC.ref());
Nursery::StringBufferVector stringBuffers;
std::swap(stringBuffers, stringBuffersToReleaseAfterMinorGC.ref());
SlimLinkedList<LargeBuffer> largeBuffers;
std::swap(largeBuffers, largeBuffersToFreeAfterMinorGC.ref());
AutoUnlockHelperThreadState unlock(lock);
lifoBlocks.freeAll();
JS::GCContext* gcx = TlsGCContext.get();
for (Nursery::BufferSet::Range r = buffers.all(); !r.empty();
r.popFront()) {
// Malloc memory associated with nursery objects is not tracked as these
// are assumed to be short lived.
gcx->freeUntracked(r.front());
}
for (auto* buffer : stringBuffers) {
buffer->Release();
}
BufferAllocator::FreeLargeAllocs(largeBuffers);
} while (hasBuffersForBackgroundFree());
}
void GCRuntime::waitBackgroundFreeEnd() { freeTask.join(); }
template <class ZoneIterT>
IncrementalProgress GCRuntime::markWeakReferences(
SliceBudget& incrementalBudget) {
MOZ_ASSERT(!marker().isWeakMarking());
gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::MARK_WEAK);
auto unlimited = SliceBudget::unlimited();
SliceBudget& budget =
marker().incrementalWeakMapMarkingEnabled ? incrementalBudget : unlimited;
// Ensure we don't return to the mutator while we're still in weak marking
// mode.
auto leaveOnExit =
mozilla::MakeScopeExit([&] { marker().leaveWeakMarkingMode(); });
if (marker().enterWeakMarkingMode()) {
// If there was an 'enter-weak-marking-mode' token in the queue, then it
// and everything after it will still be in the queue so we can process
// them now.
while (processTestMarkQueue() == QueueYielded) {
};
// Do not rely on the information about not-yet-marked weak keys that have
// been collected by barriers. Clear out the gcEphemeronEdges entries and
// rebuild the full table. Note that this a cross-zone operation; delegate
// zone entries will be populated by map zone traversals, so everything
// needs to be cleared first, then populated.
if (!marker().incrementalWeakMapMarkingEnabled) {
for (ZoneIterT zone(this); !zone.done(); zone.next()) {
zone->gcEphemeronEdges().clearAndCompact();
}
}
for (ZoneIterT zone(this); !zone.done(); zone.next()) {
if (zone->enterWeakMarkingMode(&marker(), budget) == NotFinished) {
return NotFinished;
}
}
}
bool markedAny = true;
while (markedAny) {
if (!marker().markUntilBudgetExhausted(budget)) {
MOZ_ASSERT(marker().incrementalWeakMapMarkingEnabled);
return NotFinished;
}
markedAny = false;
if (!marker().isWeakMarking()) {
for (ZoneIterT zone(this); !zone.done(); zone.next()) {
markedAny |= WeakMapBase::markZoneIteratively(zone, &marker());
}
}
markedAny |= jit::JitRuntime::MarkJitcodeGlobalTableIteratively(&marker());
}
assertNoMarkingWork();
return Finished;
}
IncrementalProgress GCRuntime::markWeakReferencesInCurrentGroup(
SliceBudget& budget) {
return markWeakReferences<SweepGroupZonesIter>(budget);
}
IncrementalProgress GCRuntime::markGrayRoots(SliceBudget& budget,
gcstats::PhaseKind phase) {
MOZ_ASSERT(marker().markColor() == MarkColor::Black);
gcstats::AutoPhase ap(stats(), phase);
{
AutoSetMarkColor setColorGray(marker(), MarkColor::Gray);
AutoUpdateLiveCompartments updateLive(this);
marker().setRootMarkingMode(true);
auto guard = mozilla::MakeScopeExit(
[this]() { marker().setRootMarkingMode(false); });
IncrementalProgress result =
traceEmbeddingGrayRoots(marker().tracer(), budget);
if (result == NotFinished) {
return NotFinished;
}
Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
marker().tracer(), Compartment::GrayEdges);
}
// Also mark any incoming cross compartment edges that were originally gray
// but have been marked black by a barrier.
Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
marker().tracer(), Compartment::BlackEdges);
return Finished;
}
IncrementalProgress GCRuntime::markAllWeakReferences() {
SliceBudget budget = SliceBudget::unlimited();
return markWeakReferences<GCZonesIter>(budget);
}
void GCRuntime::markAllGrayReferences(gcstats::PhaseKind phase) {
#ifdef DEBUG
// Check zones are in the correct state to be marked.
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
MOZ_ASSERT(zone->isGCMarkingBlackAndGray());
}
#endif
SliceBudget budget = SliceBudget::unlimited();
markGrayRoots(budget, phase);
drainMarkStack();
}
void GCRuntime::dropStringWrappers() {
/*
* String "wrappers" are dropped on GC because their presence would require
* us to sweep the wrappers in all compartments every time we sweep a
* compartment group.
*/
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
zone->dropStringWrappersOnGC();
}
}
/*
* Group zones that must be swept at the same time.
*
* From the point of view of the mutator, groups of zones transition atomically
* from marking to sweeping. If compartment A has an edge to an unmarked object
* in compartment B, then we must not start sweeping A in a later slice than we
* start sweeping B. That's because a write barrier in A could lead to the
* unmarked object in B becoming marked. However, if we had already swept that
* object, we would be in trouble.
*
* If we consider these dependencies as a graph, then all the compartments in
* any strongly-connected component of this graph must start sweeping in the
* same slice.
*
* Tarjan's algorithm is used to calculate the components.
*/
bool Compartment::findSweepGroupEdges() {
Zone* source = zone();
for (WrappedObjectCompartmentEnum e(this); !e.empty(); e.popFront()) {
Compartment* targetComp = e.front();
Zone* target = targetComp->zone();
if (!target->isGCMarking() || source->hasSweepGroupEdgeTo(target)) {
continue;
}
for (ObjectWrapperEnum e(this, targetComp); !e.empty(); e.popFront()) {
JSObject* key = e.front().mutableKey();
MOZ_ASSERT(key->zone() == target);
// Add an edge to the wrapped object's zone to ensure that the wrapper
// zone is not still being marked when we start sweeping the wrapped zone.
// As an optimization, if the wrapped object is already marked black there
// is no danger of later marking and we can skip this.
if (key->isMarkedBlack()) {
continue;
}
if (!source->addSweepGroupEdgeTo(target)) {
return false;
}
// We don't need to consider any more wrappers for this target
// compartment since we already added an edge.
break;
}
}
return true;
}
bool Zone::findSweepGroupEdges(Zone* atomsZone) {
MOZ_ASSERT_IF(this != atomsZone, !isAtomsZone());
#ifdef DEBUG
if (FinalizationObservers* observers = finalizationObservers()) {
observers->checkTables();
}
#endif
// Any zone may have a pointer to an atom in the atoms zone, and these aren't
// in the cross compartment map.
if (atomsZone->wasGCStarted() && !addSweepGroupEdgeTo(atomsZone)) {
return false;
}
for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
if (!comp->findSweepGroupEdges()) {
return false;
}
}
return WeakMapBase::findSweepGroupEdgesForZone(this);
}
bool GCRuntime::addEdgesForMarkQueue() {
#ifdef DEBUG
// For testing only.
//
// Add edges between all objects mentioned in the test mark queue, since
// otherwise they will get marked in a different order than their sweep
// groups. Note that this is only done at the beginning of an incremental
// collection, so it is possible for objects to be added later that do not
// follow the sweep group ordering. These objects will wait until their sweep
// group comes up, or will be skipped if their sweep group is already past.
JS::Zone* prevZone = nullptr;
for (Value val : testMarkQueue) {
if (!val.isObject()) {
continue;
}
JSObject* obj = &val.toObject();
JS::Zone* zone = obj->zone();
if (!zone->isGCMarking()) {
continue;
}
if (prevZone && prevZone != zone) {
if (!prevZone->addSweepGroupEdgeTo(zone)) {
return false;
}
}
prevZone = zone;
}
#endif
return true;
}
bool GCRuntime::findSweepGroupEdges() {
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
if (!zone->findSweepGroupEdges(atomsZone())) {
return false;
}
}
if (!addEdgesForMarkQueue()) {
return false;
}
return DebugAPI::findSweepGroupEdges(rt);
}
void GCRuntime::groupZonesForSweeping(JS::GCReason reason) {
#ifdef DEBUG
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
}
#endif
JSContext* cx = rt->mainContextFromOwnThread();
ZoneComponentFinder finder(cx);
if (!isIncremental || !findSweepGroupEdges()) {
finder.useOneComponent();
}
// Use one component for zeal modes that yield at specific points.
if (useZeal && zealModeControlsYieldPoint()) {
finder.useOneComponent();
}
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
MOZ_ASSERT(zone->isGCMarking());
finder.addNode(zone);
}
sweepGroups = finder.getResultsList();
currentSweepGroup = sweepGroups;
sweepGroupIndex = 1;
for (GCZonesIter zone(this); !zone.done(); zone.next()) {
zone->clearSweepGroupEdges();
}
#ifdef DEBUG
unsigned idx = sweepGroupIndex;
for (Zone* head = currentSweepGroup; head; head = head->nextGroup()) {
for (Zone* zone = head; zone; zone = zone->nextNodeInGroup()) {
MOZ_ASSERT(zone->isGCMarking());
zone->gcSweepGroupIndex = idx;
}
idx++;
}
MOZ_ASSERT_IF(!isIncremental, !currentSweepGroup->nextGroup());
for (ZonesIter zone(this, WithAtoms); !zone.done(); zone.next()) {
MOZ_ASSERT(zone->gcSweepGroupEdges().empty());
}
#endif
}
void GCRuntime::moveToNextSweepGroup() {
currentSweepGroup = currentSweepGroup->nextGroup();
++sweepGroupIndex;
if (!currentSweepGroup) {
abortSweepAfterCurrentGroup = false;
return;
}
MOZ_ASSERT_IF(abortSweepAfterCurrentGroup, !isIncremental);
if (!isIncremental) {
ZoneComponentFinder::mergeGroups(currentSweepGroup);
}
for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
MOZ_ASSERT(zone->gcState() == zone->initialMarkingState());
MOZ_ASSERT(!zone->isQueuedForBackgroundSweep());
}
if (abortSweepAfterCurrentGroup) {
markTask.join();
// Abort collection of subsequent sweep groups.
for (SweepGroupZonesIter zone(this); !zone.done(); zone.next()) {
MOZ_ASSERT(!zone->gcNextGraphComponent);
zone->changeGCState(zone->initialMarkingState(), Zone::Finished);
zone->arenas.unmarkPreMarkedFreeCells();
zone->arenas.mergeArenasFromCollectingLists();
zone->clearGCSliceThresholds();
}
for (SweepGroupCompartmentsIter comp(rt); !comp.done(); comp.next()) {
resetGrayList(comp);
}
abortSweepAfterCurrentGroup = false;
currentSweepGroup = nullptr;
}
}
/*
* Gray marking:
*
* At the end of collection, anything reachable from a gray root that has not
* otherwise been marked black must be marked gray.
*
* This means that when marking things gray we must not allow marking to leave
* the current compartment group, as that could result in things being marked
* gray when they might subsequently be marked black. To achieve this, when we
* find a cross compartment pointer we don't mark the referent but add it to a
* singly-linked list of incoming gray pointers that is stored with each
* compartment.
*
* The list head is stored in Compartment::gcIncomingGrayPointers and contains
* cross compartment wrapper objects. The next pointer is stored in the second
* extra slot of the cross compartment wrapper.
*
* The list is created during gray marking when one of the
* MarkCrossCompartmentXXX functions is called for a pointer that leaves the
* current compartent group. This calls DelayCrossCompartmentGrayMarking to
* push the referring object onto the list.
*
* The list is traversed and then unlinked in
* GCRuntime::markIncomingGrayCrossCompartmentPointers.
*/
static bool IsGrayListObject(JSObject* obj) {
MOZ_ASSERT(obj);
return obj->is<CrossCompartmentWrapperObject>() && !IsDeadProxyObject(obj);
}
/* static */
unsigned ProxyObject::grayLinkReservedSlot(JSObject* obj) {
MOZ_ASSERT(IsGrayListObject(obj));
return CrossCompartmentWrapperObject::GrayLinkReservedSlot;
}
#ifdef DEBUG
static void AssertNotOnGrayList(JSObject* obj) {
MOZ_ASSERT_IF(
IsGrayListObject(obj),
GetProxyReservedSlot(obj, ProxyObject::grayLinkReservedSlot(obj))
.isUndefined());
}
#endif
static void AssertNoWrappersInGrayList(JSRuntime* rt) {
#ifdef DEBUG
for (CompartmentsIter c(rt); !c.done(); c.next()) {
MOZ_ASSERT(!c->gcIncomingGrayPointers);
for (Compartment::ObjectWrapperEnum e(c); !e.empty(); e.popFront()) {
AssertNotOnGrayList(e.front().value().unbarrieredGet());
}
}
#endif
}
static JSObject* CrossCompartmentPointerReferent(JSObject* obj) {
MOZ_ASSERT(IsGrayListObject(obj));
return &obj->as<ProxyObject>().private_().toObject();
}
static JSObject* NextIncomingCrossCompartmentPointer(JSObject* prev,
bool unlink) {
unsigned slot = ProxyObject::grayLinkReservedSlot(prev);
JSObject* next = GetProxyReservedSlot(prev, slot).toObjectOrNull();
MOZ_ASSERT_IF(next, IsGrayListObject(next));