Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sw=2 et tw=80:
3
*
4
* This Source Code Form is subject to the terms of the Mozilla Public
5
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
6
* You can obtain one at http://mozilla.org/MPL/2.0/. */
7
8
#include "gc/Nursery-inl.h"
9
10
#include "mozilla/DebugOnly.h"
11
#include "mozilla/IntegerPrintfMacros.h"
12
#include "mozilla/Move.h"
13
#include "mozilla/Unused.h"
14
15
#include <algorithm>
16
17
#include "builtin/MapObject.h"
18
#include "debugger/DebugAPI.h"
19
#include "gc/FreeOp.h"
20
#include "gc/GCInternals.h"
21
#include "gc/Memory.h"
22
#include "gc/PublicIterators.h"
23
#include "jit/JitFrames.h"
24
#include "jit/JitRealm.h"
25
#include "util/Poison.h"
26
#include "vm/ArrayObject.h"
27
#if defined(DEBUG)
28
# include "vm/EnvironmentObject.h"
29
#endif
30
#include "vm/JSONPrinter.h"
31
#include "vm/Realm.h"
32
#include "vm/Time.h"
33
#include "vm/TypedArrayObject.h"
34
#include "vm/TypeInference.h"
35
36
#include "gc/Marking-inl.h"
37
#include "gc/Zone-inl.h"
38
#include "vm/NativeObject-inl.h"
39
40
using namespace js;
41
using namespace gc;
42
43
using mozilla::DebugOnly;
44
using mozilla::PodCopy;
45
using mozilla::TimeDuration;
46
using mozilla::TimeStamp;
47
48
#ifdef JS_GC_ZEAL
49
constexpr uintptr_t CanaryMagicValue = 0xDEADB15D;
50
51
struct js::Nursery::Canary {
52
uintptr_t magicValue;
53
Canary* next;
54
};
55
#endif
56
57
namespace js {
58
struct NurseryChunk {
59
char data[Nursery::NurseryChunkUsableSize];
60
gc::ChunkTrailer trailer;
61
static NurseryChunk* fromChunk(gc::Chunk* chunk);
62
void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize);
63
void poisonRange(size_t from, size_t size, uint8_t value,
64
MemCheckKind checkKind);
65
void poisonAfterEvict(size_t extent = ChunkSize);
66
67
// The end of the range is always ChunkSize - ArenaSize.
68
void markPagesUnusedHard(size_t from);
69
// The start of the range is always the beginning of the chunk.
70
MOZ_MUST_USE bool markPagesInUseHard(size_t to);
71
72
uintptr_t start() const { return uintptr_t(&data); }
73
uintptr_t end() const { return uintptr_t(&trailer); }
74
gc::Chunk* toChunk(GCRuntime* gc);
75
};
76
static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
77
"Nursery chunk size must match gc::Chunk size.");
78
79
} // namespace js
80
81
inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) {
82
poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
83
MOZ_MAKE_MEM_UNDEFINED(&trailer, sizeof(trailer));
84
new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer());
85
}
86
87
inline void js::NurseryChunk::poisonRange(size_t from, size_t size,
88
uint8_t value,
89
MemCheckKind checkKind) {
90
MOZ_ASSERT(from <= js::Nursery::NurseryChunkUsableSize);
91
MOZ_ASSERT(from + size <= ChunkSize);
92
93
uint8_t* start = reinterpret_cast<uint8_t*>(this) + from;
94
95
// We can poison the same chunk more than once, so first make sure memory
96
// sanitizers will let us poison it.
97
MOZ_MAKE_MEM_UNDEFINED(start, size);
98
Poison(start, value, size, checkKind);
99
}
100
101
inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
102
MOZ_ASSERT(extent <= ChunkSize);
103
poisonRange(0, extent, JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess);
104
}
105
106
inline void js::NurseryChunk::markPagesUnusedHard(size_t from) {
107
MOZ_ASSERT(from < ChunkSize - ArenaSize);
108
MarkPagesUnusedHard(reinterpret_cast<void*>(start() + from),
109
ChunkSize - ArenaSize - from);
110
}
111
112
inline bool js::NurseryChunk::markPagesInUseHard(size_t to) {
113
MOZ_ASSERT(to <= ChunkSize - ArenaSize);
114
return MarkPagesInUseHard(reinterpret_cast<void*>(start()), to);
115
}
116
117
// static
118
inline js::NurseryChunk* js::NurseryChunk::fromChunk(Chunk* chunk) {
119
return reinterpret_cast<NurseryChunk*>(chunk);
120
}
121
122
inline Chunk* js::NurseryChunk::toChunk(GCRuntime* gc) {
123
auto chunk = reinterpret_cast<Chunk*>(this);
124
chunk->init(gc);
125
return chunk;
126
}
127
128
void js::NurseryDecommitTask::queueChunk(
129
NurseryChunk* nchunk, const AutoLockHelperThreadState& lock) {
130
// Using the chunk pointers to build the queue is infallible.
131
Chunk* chunk = nchunk->toChunk(gc);
132
chunk->info.prev = nullptr;
133
chunk->info.next = queue;
134
queue = chunk;
135
}
136
137
void js::NurseryDecommitTask::queueRange(
138
size_t newCapacity, NurseryChunk& newChunk,
139
const AutoLockHelperThreadState& lock) {
140
MOZ_ASSERT(!partialChunk || partialChunk == &newChunk);
141
142
// Only save this to decommit later if there's at least one page to
143
// decommit.
144
if (RoundUp(newCapacity, SystemPageSize()) >=
145
RoundDown(Nursery::NurseryChunkUsableSize, SystemPageSize())) {
146
// Clear the existing decommit request because it may be a larger request
147
// for the same chunk.
148
partialChunk = nullptr;
149
return;
150
}
151
partialChunk = &newChunk;
152
partialCapacity = newCapacity;
153
}
154
155
Chunk* js::NurseryDecommitTask::popChunk(
156
const AutoLockHelperThreadState& lock) {
157
if (!queue) {
158
return nullptr;
159
}
160
161
Chunk* chunk = queue;
162
queue = chunk->info.next;
163
chunk->info.next = nullptr;
164
MOZ_ASSERT(chunk->info.prev == nullptr);
165
return chunk;
166
}
167
168
void js::NurseryDecommitTask::run() {
169
Chunk* chunk;
170
171
{
172
AutoLockHelperThreadState lock;
173
174
while ((chunk = popChunk(lock)) || partialChunk) {
175
if (chunk) {
176
AutoUnlockHelperThreadState unlock(lock);
177
decommitChunk(chunk);
178
continue;
179
}
180
181
if (partialChunk) {
182
decommitRange(lock);
183
continue;
184
}
185
}
186
187
setFinishing(lock);
188
}
189
}
190
191
void js::NurseryDecommitTask::decommitChunk(Chunk* chunk) {
192
chunk->decommitAllArenas();
193
{
194
AutoLockGC lock(gc);
195
gc->recycleChunk(chunk, lock);
196
}
197
}
198
199
void js::NurseryDecommitTask::decommitRange(AutoLockHelperThreadState& lock) {
200
// Clear this field here before releasing the lock. While the lock is
201
// released the main thread may make new decommit requests or update the range
202
// of the current requested chunk, but it won't attempt to use any
203
// might-be-decommitted-soon memory.
204
NurseryChunk* thisPartialChunk = partialChunk;
205
size_t thisPartialCapacity = partialCapacity;
206
partialChunk = nullptr;
207
{
208
AutoUnlockHelperThreadState unlock(lock);
209
thisPartialChunk->markPagesUnusedHard(thisPartialCapacity);
210
}
211
}
212
213
js::Nursery::Nursery(GCRuntime* gc)
214
: gc(gc),
215
position_(0),
216
currentStartChunk_(0),
217
currentStartPosition_(0),
218
currentEnd_(0),
219
currentStringEnd_(0),
220
currentChunk_(0),
221
capacity_(0),
222
timeInChunkAlloc_(0),
223
profileThreshold_(0),
224
enableProfiling_(false),
225
canAllocateStrings_(true),
226
reportTenurings_(0),
227
minorGCTriggerReason_(JS::GCReason::NO_REASON),
228
decommitTask(gc)
229
#ifdef JS_GC_ZEAL
230
,
231
lastCanary_(nullptr)
232
#endif
233
{
234
const char* env = getenv("MOZ_NURSERY_STRINGS");
235
if (env && *env) {
236
canAllocateStrings_ = (*env == '1');
237
}
238
}
239
240
bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
241
// The nursery is permanently disabled when recording or replaying. Nursery
242
// collections may occur at non-deterministic points in execution.
243
if (mozilla::recordreplay::IsRecordingOrReplaying()) {
244
return true;
245
}
246
247
capacity_ = roundSize(tunables().gcMinNurseryBytes());
248
if (!allocateNextChunk(0, lock)) {
249
capacity_ = 0;
250
return false;
251
}
252
// After this point the Nursery has been enabled.
253
254
setCurrentChunk(0);
255
setStartPosition();
256
poisonAndInitCurrentChunk();
257
258
char* env = getenv("JS_GC_PROFILE_NURSERY");
259
if (env) {
260
if (0 == strcmp(env, "help")) {
261
fprintf(stderr,
262
"JS_GC_PROFILE_NURSERY=N\n"
263
"\tReport minor GC's taking at least N microseconds.\n");
264
exit(0);
265
}
266
enableProfiling_ = true;
267
profileThreshold_ = TimeDuration::FromMicroseconds(atoi(env));
268
}
269
270
env = getenv("JS_GC_REPORT_TENURING");
271
if (env) {
272
if (0 == strcmp(env, "help")) {
273
fprintf(stderr,
274
"JS_GC_REPORT_TENURING=N\n"
275
"\tAfter a minor GC, report any ObjectGroups with at least N "
276
"instances tenured.\n");
277
exit(0);
278
}
279
reportTenurings_ = atoi(env);
280
}
281
282
if (!gc->storeBuffer().enable()) {
283
return false;
284
}
285
286
MOZ_ASSERT(isEnabled());
287
return true;
288
}
289
290
js::Nursery::~Nursery() { disable(); }
291
292
void js::Nursery::enable() {
293
MOZ_ASSERT(isEmpty());
294
MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
295
if (isEnabled() || mozilla::recordreplay::IsRecordingOrReplaying()) {
296
return;
297
}
298
299
{
300
AutoLockGCBgAlloc lock(gc);
301
capacity_ = roundSize(tunables().gcMinNurseryBytes());
302
if (!allocateNextChunk(0, lock)) {
303
capacity_ = 0;
304
return;
305
}
306
}
307
308
setCurrentChunk(0);
309
setStartPosition();
310
poisonAndInitCurrentChunk();
311
#ifdef JS_GC_ZEAL
312
if (gc->hasZealMode(ZealMode::GenerationalGC)) {
313
enterZealMode();
314
}
315
#endif
316
317
MOZ_ALWAYS_TRUE(gc->storeBuffer().enable());
318
}
319
320
void js::Nursery::disable() {
321
MOZ_ASSERT(isEmpty());
322
if (!isEnabled()) {
323
return;
324
}
325
326
// Freeing the chunks must not race with decommitting part of one of our
327
// chunks. So join the decommitTask here and also below.
328
decommitTask.join();
329
freeChunksFrom(0);
330
capacity_ = 0;
331
332
// We must reset currentEnd_ so that there is no space for anything in the
333
// nursery. JIT'd code uses this even if the nursery is disabled.
334
currentEnd_ = 0;
335
currentStringEnd_ = 0;
336
position_ = 0;
337
gc->storeBuffer().disable();
338
339
decommitTask.join();
340
}
341
342
void js::Nursery::enableStrings() {
343
MOZ_ASSERT(isEmpty());
344
canAllocateStrings_ = true;
345
currentStringEnd_ = currentEnd_;
346
}
347
348
void js::Nursery::disableStrings() {
349
MOZ_ASSERT(isEmpty());
350
canAllocateStrings_ = false;
351
currentStringEnd_ = 0;
352
}
353
354
bool js::Nursery::isEmpty() const {
355
if (!isEnabled()) {
356
return true;
357
}
358
359
if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
360
MOZ_ASSERT(currentStartChunk_ == 0);
361
MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
362
}
363
return position() == currentStartPosition_;
364
}
365
366
#ifdef JS_GC_ZEAL
367
void js::Nursery::enterZealMode() {
368
if (isEnabled()) {
369
MOZ_ASSERT(isEmpty());
370
if (isSubChunkMode()) {
371
// The poisoning call below must not race with background decommit,
372
// which could be attempting to decommit the currently-unused part of this
373
// chunk.
374
decommitTask.join();
375
{
376
AutoEnterOOMUnsafeRegion oomUnsafe;
377
if (!chunk(0).markPagesInUseHard(ChunkSize - ArenaSize)) {
378
oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode");
379
}
380
}
381
382
// It'd be simpler to poison the whole chunk, but we can't do that
383
// because the nursery might be partially used.
384
chunk(0).poisonRange(capacity_, NurseryChunkUsableSize - capacity_,
385
JS_FRESH_NURSERY_PATTERN,
386
MemCheckKind::MakeUndefined);
387
}
388
capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
389
setCurrentEnd();
390
}
391
}
392
393
void js::Nursery::leaveZealMode() {
394
if (isEnabled()) {
395
MOZ_ASSERT(isEmpty());
396
setCurrentChunk(0);
397
setStartPosition();
398
poisonAndInitCurrentChunk();
399
}
400
}
401
#endif // JS_GC_ZEAL
402
403
JSObject* js::Nursery::allocateObject(JSContext* cx, size_t size,
404
size_t nDynamicSlots,
405
const JSClass* clasp) {
406
// Ensure there's enough space to replace the contents with a
407
// RelocationOverlay.
408
MOZ_ASSERT(size >= sizeof(RelocationOverlay));
409
410
// Sanity check the finalizer.
411
MOZ_ASSERT_IF(clasp->hasFinalize(),
412
CanNurseryAllocateFinalizedClass(clasp) || clasp->isProxy());
413
414
// Make the object allocation.
415
JSObject* obj = static_cast<JSObject*>(allocate(size));
416
if (!obj) {
417
return nullptr;
418
}
419
420
// If we want external slots, add them.
421
HeapSlot* slots = nullptr;
422
if (nDynamicSlots) {
423
MOZ_ASSERT(clasp->isNative());
424
slots = static_cast<HeapSlot*>(
425
allocateBuffer(cx->zone(), nDynamicSlots * sizeof(HeapSlot)));
426
if (!slots) {
427
// It is safe to leave the allocated object uninitialized, since we
428
// do not visit unallocated things in the nursery.
429
return nullptr;
430
}
431
}
432
433
// Store slots pointer directly in new object. If no dynamic slots were
434
// requested, caller must initialize slots_ field itself as needed. We
435
// don't know if the caller was a native object or not.
436
if (nDynamicSlots) {
437
static_cast<NativeObject*>(obj)->initSlots(slots);
438
}
439
440
gcTracer.traceNurseryAlloc(obj, size);
441
return obj;
442
}
443
444
Cell* js::Nursery::allocateString(Zone* zone, size_t size, AllocKind kind) {
445
// Ensure there's enough space to replace the contents with a
446
// RelocationOverlay.
447
MOZ_ASSERT(size >= sizeof(RelocationOverlay));
448
449
size_t allocSize = RoundUp(sizeof(StringLayout) - 1 + size, CellAlignBytes);
450
auto header = static_cast<StringLayout*>(allocate(allocSize));
451
if (!header) {
452
return nullptr;
453
}
454
header->zone = zone;
455
456
auto cell = reinterpret_cast<Cell*>(&header->cell);
457
gcTracer.traceNurseryAlloc(cell, kind);
458
return cell;
459
}
460
461
void* js::Nursery::allocate(size_t size) {
462
MOZ_ASSERT(isEnabled());
463
MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
464
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
465
MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_,
466
position() >= currentStartPosition_);
467
MOZ_ASSERT(position() % CellAlignBytes == 0);
468
MOZ_ASSERT(size % CellAlignBytes == 0);
469
470
#ifdef JS_GC_ZEAL
471
static const size_t CanarySize =
472
(sizeof(Nursery::Canary) + CellAlignBytes - 1) & ~CellAlignMask;
473
if (gc->hasZealMode(ZealMode::CheckNursery)) {
474
size += CanarySize;
475
}
476
#endif
477
478
if (currentEnd() < position() + size) {
479
unsigned chunkno = currentChunk_ + 1;
480
MOZ_ASSERT(chunkno <= maxChunkCount());
481
MOZ_ASSERT(chunkno <= allocatedChunkCount());
482
if (chunkno == maxChunkCount()) {
483
return nullptr;
484
}
485
if (MOZ_UNLIKELY(chunkno == allocatedChunkCount())) {
486
mozilla::TimeStamp start = ReallyNow();
487
{
488
AutoLockGCBgAlloc lock(gc);
489
if (!allocateNextChunk(chunkno, lock)) {
490
return nullptr;
491
}
492
}
493
timeInChunkAlloc_ += ReallyNow() - start;
494
MOZ_ASSERT(chunkno < allocatedChunkCount());
495
}
496
setCurrentChunk(chunkno);
497
poisonAndInitCurrentChunk();
498
}
499
500
void* thing = (void*)position();
501
position_ = position() + size;
502
// We count this regardless of the profiler's state, assuming that it costs
503
// just as much to count it, as to check the profiler's state and decide not
504
// to count it.
505
stats().noteNurseryAlloc();
506
507
DebugOnlyPoison(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
508
MemCheckKind::MakeUndefined);
509
510
#ifdef JS_GC_ZEAL
511
if (gc->hasZealMode(ZealMode::CheckNursery)) {
512
auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
513
canary->magicValue = CanaryMagicValue;
514
canary->next = nullptr;
515
if (lastCanary_) {
516
MOZ_ASSERT(!lastCanary_->next);
517
lastCanary_->next = canary;
518
}
519
lastCanary_ = canary;
520
}
521
#endif
522
523
return thing;
524
}
525
526
void* js::Nursery::allocateBuffer(Zone* zone, size_t nbytes) {
527
MOZ_ASSERT(nbytes > 0);
528
529
if (nbytes <= MaxNurseryBufferSize) {
530
void* buffer = allocate(nbytes);
531
if (buffer) {
532
return buffer;
533
}
534
}
535
536
void* buffer = zone->pod_malloc<uint8_t>(nbytes);
537
if (buffer && !registerMallocedBuffer(buffer)) {
538
js_free(buffer);
539
return nullptr;
540
}
541
return buffer;
542
}
543
544
void* js::Nursery::allocateBuffer(JSObject* obj, size_t nbytes) {
545
MOZ_ASSERT(obj);
546
MOZ_ASSERT(nbytes > 0);
547
548
if (!IsInsideNursery(obj)) {
549
return obj->zone()->pod_malloc<uint8_t>(nbytes);
550
}
551
return allocateBuffer(obj->zone(), nbytes);
552
}
553
554
void* js::Nursery::allocateBufferSameLocation(JSObject* obj, size_t nbytes) {
555
MOZ_ASSERT(obj);
556
MOZ_ASSERT(nbytes > 0);
557
MOZ_ASSERT(nbytes <= MaxNurseryBufferSize);
558
559
if (!IsInsideNursery(obj)) {
560
return obj->zone()->pod_malloc<uint8_t>(nbytes);
561
}
562
563
return allocate(nbytes);
564
}
565
566
void* js::Nursery::allocateZeroedBuffer(
567
Zone* zone, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
568
MOZ_ASSERT(nbytes > 0);
569
570
if (nbytes <= MaxNurseryBufferSize) {
571
void* buffer = allocate(nbytes);
572
if (buffer) {
573
memset(buffer, 0, nbytes);
574
return buffer;
575
}
576
}
577
578
void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes);
579
if (buffer && !registerMallocedBuffer(buffer)) {
580
js_free(buffer);
581
return nullptr;
582
}
583
return buffer;
584
}
585
586
void* js::Nursery::allocateZeroedBuffer(
587
JSObject* obj, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
588
MOZ_ASSERT(obj);
589
MOZ_ASSERT(nbytes > 0);
590
591
if (!IsInsideNursery(obj)) {
592
return obj->zone()->pod_arena_calloc<uint8_t>(arena, nbytes);
593
}
594
return allocateZeroedBuffer(obj->zone(), nbytes, arena);
595
}
596
597
void* js::Nursery::reallocateBuffer(JSObject* obj, void* oldBuffer,
598
size_t oldBytes, size_t newBytes) {
599
if (!IsInsideNursery(obj)) {
600
return obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes,
601
newBytes);
602
}
603
604
if (!isInside(oldBuffer)) {
605
void* newBuffer = obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer,
606
oldBytes, newBytes);
607
if (newBuffer && oldBuffer != newBuffer) {
608
MOZ_ALWAYS_TRUE(mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
609
}
610
return newBuffer;
611
}
612
613
// The nursery cannot make use of the returned slots data.
614
if (newBytes < oldBytes) {
615
return oldBuffer;
616
}
617
618
void* newBuffer = allocateBuffer(obj->zone(), newBytes);
619
if (newBuffer) {
620
PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
621
}
622
return newBuffer;
623
}
624
625
void js::Nursery::freeBuffer(void* buffer) {
626
if (!isInside(buffer)) {
627
removeMallocedBuffer(buffer);
628
js_free(buffer);
629
}
630
}
631
632
void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
633
MOZ_ASSERT(isInside(oldData));
634
635
// Bug 1196210: If a zero-capacity header lands in the last 2 words of a
636
// jemalloc chunk abutting the start of a nursery chunk, the (invalid)
637
// newData pointer will appear to be "inside" the nursery.
638
MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
639
640
AutoEnterOOMUnsafeRegion oomUnsafe;
641
#ifdef DEBUG
642
if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) {
643
MOZ_ASSERT(p->value() == newData);
644
}
645
#endif
646
if (!forwardedBuffers.put(oldData, newData)) {
647
oomUnsafe.crash("Nursery::setForwardingPointer");
648
}
649
}
650
651
#ifdef DEBUG
652
static bool IsWriteableAddress(void* ptr) {
653
volatile uint64_t* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
654
*vPtr = *vPtr;
655
return true;
656
}
657
#endif
658
659
void js::Nursery::forwardBufferPointer(HeapSlot** pSlotsElems) {
660
HeapSlot* old = *pSlotsElems;
661
662
if (!isInside(old)) {
663
return;
664
}
665
666
// The new location for this buffer is either stored inline with it or in
667
// the forwardedBuffers table.
668
do {
669
if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(old)) {
670
*pSlotsElems = reinterpret_cast<HeapSlot*>(p->value());
671
break;
672
}
673
674
*pSlotsElems = *reinterpret_cast<HeapSlot**>(old);
675
} while (false);
676
677
MOZ_ASSERT(!isInside(*pSlotsElems));
678
MOZ_ASSERT(IsWriteableAddress(*pSlotsElems));
679
}
680
681
js::TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
682
: JSTracer(rt, JSTracer::TracerKindTag::Tenuring, TraceWeakMapKeysValues),
683
nursery_(*nursery),
684
tenuredSize(0),
685
tenuredCells(0),
686
objHead(nullptr),
687
objTail(&objHead),
688
stringHead(nullptr),
689
stringTail(&stringHead) {}
690
691
inline float js::Nursery::calcPromotionRate(bool* validForTenuring) const {
692
float used = float(previousGC.nurseryUsedBytes);
693
float capacity = float(previousGC.nurseryCapacity);
694
float tenured = float(previousGC.tenuredBytes);
695
float rate;
696
697
if (previousGC.nurseryUsedBytes > 0) {
698
if (validForTenuring) {
699
// We can only use promotion rates if they're likely to be valid,
700
// they're only valid if the nursery was at least 90% full.
701
*validForTenuring = used > capacity * 0.9f;
702
}
703
rate = tenured / used;
704
} else {
705
if (validForTenuring) {
706
*validForTenuring = false;
707
}
708
rate = 0.0f;
709
}
710
711
return rate;
712
}
713
714
void js::Nursery::renderProfileJSON(JSONPrinter& json) const {
715
if (!isEnabled()) {
716
json.beginObject();
717
json.property("status", "nursery disabled");
718
json.endObject();
719
return;
720
}
721
722
if (previousGC.reason == JS::GCReason::NO_REASON) {
723
// If the nursery was empty when the last minorGC was requested, then
724
// no nursery collection will have been performed but JSON may still be
725
// requested. (And as a public API, this function should not crash in
726
// such a case.)
727
json.beginObject();
728
json.property("status", "nursery empty");
729
json.endObject();
730
return;
731
}
732
733
json.beginObject();
734
735
json.property("status", "complete");
736
737
json.property("reason", JS::ExplainGCReason(previousGC.reason));
738
json.property("bytes_tenured", previousGC.tenuredBytes);
739
json.property("cells_tenured", previousGC.tenuredCells);
740
json.property("strings_tenured",
741
stats().getStat(gcstats::STAT_STRINGS_TENURED));
742
json.property("bytes_used", previousGC.nurseryUsedBytes);
743
json.property("cur_capacity", previousGC.nurseryCapacity);
744
const size_t newCapacity = capacity();
745
if (newCapacity != previousGC.nurseryCapacity) {
746
json.property("new_capacity", newCapacity);
747
}
748
if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) {
749
json.property("lazy_capacity", previousGC.nurseryCommitted);
750
}
751
if (!timeInChunkAlloc_.IsZero()) {
752
json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS);
753
}
754
755
// These counters only contain consistent data if the profiler is enabled,
756
// and then there's no guarentee.
757
if (runtime()->geckoProfiler().enabled()) {
758
json.property("cells_allocated_nursery",
759
stats().allocsSinceMinorGCNursery());
760
json.property("cells_allocated_tenured",
761
stats().allocsSinceMinorGCTenured());
762
}
763
764
if (stats().getStat(gcstats::STAT_OBJECT_GROUPS_PRETENURED)) {
765
json.property("groups_pretenured",
766
stats().getStat(gcstats::STAT_OBJECT_GROUPS_PRETENURED));
767
}
768
if (stats().getStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED)) {
769
json.property(
770
"nursery_string_realms_disabled",
771
stats().getStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED));
772
}
773
774
json.beginObjectProperty("phase_times");
775
776
#define EXTRACT_NAME(name, text) #name,
777
static const char* const names[] = {
778
FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
779
#undef EXTRACT_NAME
780
""};
781
782
size_t i = 0;
783
for (auto time : profileDurations_) {
784
json.property(names[i++], time, json.MICROSECONDS);
785
}
786
787
json.endObject(); // timings value
788
789
json.endObject();
790
}
791
792
// static
793
void js::Nursery::printProfileHeader() {
794
fprintf(stderr, "MinorGC: Reason PRate Size ");
795
#define PRINT_HEADER(name, text) fprintf(stderr, " %6s", text);
796
FOR_EACH_NURSERY_PROFILE_TIME(PRINT_HEADER)
797
#undef PRINT_HEADER
798
fprintf(stderr, "\n");
799
}
800
801
// static
802
void js::Nursery::printProfileDurations(const ProfileDurations& times) {
803
for (auto time : times) {
804
fprintf(stderr, " %6" PRIi64, static_cast<int64_t>(time.ToMicroseconds()));
805
}
806
fprintf(stderr, "\n");
807
}
808
809
void js::Nursery::printTotalProfileTimes() {
810
if (enableProfiling_) {
811
fprintf(stderr, "MinorGC TOTALS: %7" PRIu64 " collections: ",
812
gc->minorGCCount());
813
printProfileDurations(totalDurations_);
814
}
815
}
816
817
void js::Nursery::maybeClearProfileDurations() {
818
for (auto& duration : profileDurations_) {
819
duration = mozilla::TimeDuration();
820
}
821
}
822
823
inline void js::Nursery::startProfile(ProfileKey key) {
824
startTimes_[key] = ReallyNow();
825
}
826
827
inline void js::Nursery::endProfile(ProfileKey key) {
828
profileDurations_[key] = ReallyNow() - startTimes_[key];
829
totalDurations_[key] += profileDurations_[key];
830
}
831
832
bool js::Nursery::shouldCollect() const {
833
if (isEmpty()) {
834
return false;
835
}
836
837
if (minorGCRequested()) {
838
return true;
839
}
840
841
bool belowBytesThreshold =
842
freeSpace() < tunables().nurseryFreeThresholdForIdleCollection();
843
bool belowFractionThreshold =
844
float(freeSpace()) / float(capacity()) <
845
tunables().nurseryFreeThresholdForIdleCollectionFraction();
846
847
// We want to use belowBytesThreshold when the nursery is sufficiently large,
848
// and belowFractionThreshold when it's small.
849
//
850
// When the nursery is small then belowBytesThreshold is a lower threshold
851
// (triggered earlier) than belowFractionThreshold. So if the fraction
852
// threshold is true, the bytes one will be true also. The opposite is true
853
// when the nursery is large.
854
//
855
// Therefore, by the time we cross the threshold we care about, we've already
856
// crossed the other one, and we can boolean AND to use either condition
857
// without encoding any "is the nursery big/small" test/threshold. The point
858
// at which they cross is when the nursery is: BytesThreshold /
859
// FractionThreshold large.
860
//
861
// With defaults that's:
862
//
863
// 1MB = 256KB / 0.25
864
//
865
return belowBytesThreshold && belowFractionThreshold;
866
}
867
868
// typeReason is the gcReason for specified type, for example,
869
// FULL_CELL_PTR_OBJ_BUFFER is the gcReason for JSObject.
870
static inline bool IsFullStoreBufferReason(JS::GCReason reason,
871
JS::GCReason typeReason) {
872
return reason == typeReason ||
873
reason == JS::GCReason::FULL_WHOLE_CELL_BUFFER ||
874
reason == JS::GCReason::FULL_GENERIC_BUFFER ||
875
reason == JS::GCReason::FULL_VALUE_BUFFER ||
876
reason == JS::GCReason::FULL_SLOT_BUFFER ||
877
reason == JS::GCReason::FULL_SHAPE_BUFFER;
878
}
879
880
void js::Nursery::collect(JS::GCReason reason) {
881
JSRuntime* rt = runtime();
882
MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
883
884
mozilla::recordreplay::AutoDisallowThreadEvents disallow;
885
886
if (!isEnabled() || isEmpty()) {
887
// Our barriers are not always exact, and there may be entries in the
888
// storebuffer even when the nursery is disabled or empty. It's not safe
889
// to keep these entries as they may refer to tenured cells which may be
890
// freed after this point.
891
gc->storeBuffer().clear();
892
}
893
894
if (!isEnabled()) {
895
return;
896
}
897
898
#ifdef JS_GC_ZEAL
899
if (gc->hasZealMode(ZealMode::CheckNursery)) {
900
for (auto canary = lastCanary_; canary; canary = canary->next) {
901
MOZ_ASSERT(canary->magicValue == CanaryMagicValue);
902
}
903
}
904
lastCanary_ = nullptr;
905
#endif
906
907
stats().beginNurseryCollection(reason);
908
gcTracer.traceMinorGCStart();
909
910
maybeClearProfileDurations();
911
startProfile(ProfileKey::Total);
912
913
// The analysis marks TenureCount as not problematic for GC hazards because
914
// it is only used here, and ObjectGroup pointers are never
915
// nursery-allocated.
916
MOZ_ASSERT(!IsNurseryAllocable(AllocKind::OBJECT_GROUP));
917
918
TenureCountCache tenureCounts;
919
previousGC.reason = JS::GCReason::NO_REASON;
920
if (!isEmpty()) {
921
doCollection(reason, tenureCounts);
922
} else {
923
previousGC.nurseryUsedBytes = 0;
924
previousGC.nurseryCapacity = capacity();
925
previousGC.nurseryCommitted = committed();
926
previousGC.tenuredBytes = 0;
927
previousGC.tenuredCells = 0;
928
}
929
930
// Resize the nursery.
931
maybeResizeNursery(reason);
932
933
// Poison/initialise the first chunk.
934
if (isEnabled() && previousGC.nurseryUsedBytes) {
935
// In most cases Nursery::clear() has not poisoned this chunk or marked it
936
// as NoAccess; so we only need to poison the region used during the last
937
// cycle. Also, if the heap was recently expanded we don't want to
938
// re-poison the new memory. In both cases we only need to poison until
939
// previousGC.nurseryUsedBytes.
940
//
941
// In cases where this is not true, like generational zeal mode or subchunk
942
// mode, poisonAndInitCurrentChunk() will ignore its parameter. It will
943
// also clamp the parameter.
944
poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
945
}
946
947
const float promotionRate = doPretenuring(rt, reason, tenureCounts);
948
949
// We ignore gcMaxBytes when allocating for minor collection. However, if we
950
// overflowed, we disable the nursery. The next time we allocate, we'll fail
951
// because bytes >= gcMaxBytes.
952
if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) {
953
disable();
954
}
955
956
endProfile(ProfileKey::Total);
957
gc->incMinorGcNumber();
958
959
TimeDuration totalTime = profileDurations_[ProfileKey::Total];
960
rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime.ToMicroseconds());
961
rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON, uint32_t(reason));
962
if (totalTime.ToMilliseconds() > 1.0) {
963
rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON_LONG, uint32_t(reason));
964
}
965
rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, committed());
966
967
stats().endNurseryCollection(reason);
968
gcTracer.traceMinorGCEnd();
969
timeInChunkAlloc_ = mozilla::TimeDuration();
970
971
if (enableProfiling_ && totalTime >= profileThreshold_) {
972
stats().maybePrintProfileHeaders();
973
974
fprintf(stderr, "MinorGC: %20s %5.1f%% %5zu ",
975
JS::ExplainGCReason(reason), promotionRate * 100,
976
capacity() / 1024);
977
printProfileDurations(profileDurations_);
978
979
if (reportTenurings_) {
980
for (auto& entry : tenureCounts.entries) {
981
if (entry.count >= reportTenurings_) {
982
fprintf(stderr, " %d x ", entry.count);
983
AutoSweepObjectGroup sweep(entry.group);
984
entry.group->print(sweep);
985
}
986
}
987
}
988
}
989
}
990
991
void js::Nursery::doCollection(JS::GCReason reason,
992
TenureCountCache& tenureCounts) {
993
JSRuntime* rt = runtime();
994
AutoGCSession session(gc, JS::HeapState::MinorCollecting);
995
AutoSetThreadIsPerformingGC performingGC;
996
AutoStopVerifyingBarriers av(rt, false);
997
AutoDisableProxyCheck disableStrictProxyChecking;
998
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
999
1000
const size_t initialNurseryCapacity = capacity();
1001
const size_t initialNurseryUsedBytes = usedSpace();
1002
1003
// Move objects pointed to by roots from the nursery to the major heap.
1004
TenuringTracer mover(rt, this);
1005
1006
// Mark the store buffer. This must happen first.
1007
StoreBuffer& sb = gc->storeBuffer();
1008
1009
// The MIR graph only contains nursery pointers if cancelIonCompilations()
1010
// is set on the store buffer, in which case we cancel all compilations
1011
// of such graphs.
1012
startProfile(ProfileKey::CancelIonCompilations);
1013
if (sb.cancelIonCompilations()) {
1014
js::CancelOffThreadIonCompilesUsingNurseryPointers(rt);
1015
}
1016
endProfile(ProfileKey::CancelIonCompilations);
1017
1018
startProfile(ProfileKey::TraceValues);
1019
sb.traceValues(mover);
1020
endProfile(ProfileKey::TraceValues);
1021
1022
startProfile(ProfileKey::TraceCells);
1023
sb.traceCells(mover);
1024
endProfile(ProfileKey::TraceCells);
1025
1026
startProfile(ProfileKey::TraceSlots);
1027
sb.traceSlots(mover);
1028
endProfile(ProfileKey::TraceSlots);
1029
1030
startProfile(ProfileKey::TraceWholeCells);
1031
sb.traceWholeCells(mover);
1032
endProfile(ProfileKey::TraceWholeCells);
1033
1034
startProfile(ProfileKey::TraceGenericEntries);
1035
sb.traceGenericEntries(&mover);
1036
endProfile(ProfileKey::TraceGenericEntries);
1037
1038
startProfile(ProfileKey::MarkRuntime);
1039
gc->traceRuntimeForMinorGC(&mover, session);
1040
endProfile(ProfileKey::MarkRuntime);
1041
1042
startProfile(ProfileKey::MarkDebugger);
1043
{
1044
gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
1045
DebugAPI::traceAllForMovingGC(&mover);
1046
}
1047
endProfile(ProfileKey::MarkDebugger);
1048
1049
startProfile(ProfileKey::SweepCaches);
1050
gc->purgeRuntimeForMinorGC();
1051
endProfile(ProfileKey::SweepCaches);
1052
1053
// Most of the work is done here. This loop iterates over objects that have
1054
// been moved to the major heap. If these objects have any outgoing pointers
1055
// to the nursery, then those nursery objects get moved as well, until no
1056
// objects are left to move. That is, we iterate to a fixed point.
1057
startProfile(ProfileKey::CollectToFP);
1058
collectToFixedPoint(mover, tenureCounts);
1059
endProfile(ProfileKey::CollectToFP);
1060
1061
// Sweep to update any pointers to nursery objects that have now been
1062
// tenured.
1063
startProfile(ProfileKey::Sweep);
1064
sweep(&mover);
1065
endProfile(ProfileKey::Sweep);
1066
1067
// Update any slot or element pointers whose destination has been tenured.
1068
startProfile(ProfileKey::UpdateJitActivations);
1069
js::jit::UpdateJitActivationsForMinorGC(rt);
1070
forwardedBuffers.clearAndCompact();
1071
endProfile(ProfileKey::UpdateJitActivations);
1072
1073
startProfile(ProfileKey::ObjectsTenuredCallback);
1074
gc->callObjectsTenuredCallback();
1075
endProfile(ProfileKey::ObjectsTenuredCallback);
1076
1077
// Sweep.
1078
startProfile(ProfileKey::FreeMallocedBuffers);
1079
gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers);
1080
endProfile(ProfileKey::FreeMallocedBuffers);
1081
1082
startProfile(ProfileKey::ClearNursery);
1083
clear();
1084
endProfile(ProfileKey::ClearNursery);
1085
1086
startProfile(ProfileKey::ClearStoreBuffer);
1087
gc->storeBuffer().clear();
1088
endProfile(ProfileKey::ClearStoreBuffer);
1089
1090
// Make sure hashtables have been updated after the collection.
1091
startProfile(ProfileKey::CheckHashTables);
1092
#ifdef JS_GC_ZEAL
1093
if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) {
1094
gc->checkHashTablesAfterMovingGC();
1095
}
1096
#endif
1097
endProfile(ProfileKey::CheckHashTables);
1098
1099
previousGC.reason = reason;
1100
previousGC.nurseryCapacity = initialNurseryCapacity;
1101
previousGC.nurseryCommitted = spaceToEnd(allocatedChunkCount());
1102
previousGC.nurseryUsedBytes = initialNurseryUsedBytes;
1103
previousGC.tenuredBytes = mover.tenuredSize;
1104
previousGC.tenuredCells = mover.tenuredCells;
1105
}
1106
1107
float js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
1108
TenureCountCache& tenureCounts) {
1109
// If we are promoting the nursery, or exhausted the store buffer with
1110
// pointers to nursery things, which will force a collection well before
1111
// the nursery is full, look for object groups that are getting promoted
1112
// excessively and try to pretenure them.
1113
startProfile(ProfileKey::Pretenure);
1114
bool validPromotionRate;
1115
const float promotionRate = calcPromotionRate(&validPromotionRate);
1116
uint32_t pretenureCount = 0;
1117
bool attempt = tunables().attemptPretenuring();
1118
1119
bool pretenureObj, pretenureStr;
1120
if (attempt) {
1121
// Should we do pretenuring regardless of gcreason?
1122
bool shouldPretenure = validPromotionRate &&
1123
promotionRate > tunables().pretenureThreshold() &&
1124
previousGC.nurseryUsedBytes >= 4 * 1024 * 1024;
1125
pretenureObj =
1126
shouldPretenure ||
1127
IsFullStoreBufferReason(reason, JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER);
1128
pretenureStr =
1129
shouldPretenure ||
1130
IsFullStoreBufferReason(reason, JS::GCReason::FULL_CELL_PTR_STR_BUFFER);
1131
} else {
1132
pretenureObj = false;
1133
pretenureStr = false;
1134
}
1135
1136
if (pretenureObj) {
1137
JSContext* cx = rt->mainContextFromOwnThread();
1138
uint32_t threshold = tunables().pretenureGroupThreshold();
1139
for (auto& entry : tenureCounts.entries) {
1140
if (entry.count < threshold) {
1141
continue;
1142
}
1143
1144
ObjectGroup* group = entry.group;
1145
AutoRealm ar(cx, group);
1146
AutoSweepObjectGroup sweep(group);
1147
if (group->canPreTenure(sweep)) {
1148
group->setShouldPreTenure(sweep, cx);
1149
pretenureCount++;
1150
}
1151
}
1152
}
1153
stats().setStat(gcstats::STAT_OBJECT_GROUPS_PRETENURED, pretenureCount);
1154
1155
mozilla::Maybe<AutoGCSession> session;
1156
uint32_t numStringsTenured = 0;
1157
uint32_t numNurseryStringRealmsDisabled = 0;
1158
for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
1159
if (pretenureStr && zone->allocNurseryStrings &&
1160
zone->tenuredStrings >= 30 * 1000) {
1161
if (!session.isSome()) {
1162
session.emplace(gc, JS::HeapState::MinorCollecting);
1163
}
1164
CancelOffThreadIonCompile(zone);
1165
bool preserving = zone->isPreservingCode();
1166
zone->setPreservingCode(false);
1167
zone->discardJitCode(rt->defaultFreeOp());
1168
zone->setPreservingCode(preserving);
1169
for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
1170
if (jit::JitRealm* jitRealm = r->jitRealm()) {
1171
jitRealm->discardStubs();
1172
jitRealm->setStringsCanBeInNursery(false);
1173
numNurseryStringRealmsDisabled++;
1174
}
1175
}
1176
zone->allocNurseryStrings = false;
1177
}
1178
numStringsTenured += zone->tenuredStrings;
1179
zone->tenuredStrings = 0;
1180
}
1181
session.reset(); // End the minor GC session, if running one.
1182
stats().setStat(gcstats::STAT_NURSERY_STRING_REALMS_DISABLED,
1183
numNurseryStringRealmsDisabled);
1184
stats().setStat(gcstats::STAT_STRINGS_TENURED, numStringsTenured);
1185
endProfile(ProfileKey::Pretenure);
1186
1187
rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT, pretenureCount);
1188
rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_PROMOTION_RATE, promotionRate * 100);
1189
1190
return promotionRate;
1191
}
1192
1193
bool js::Nursery::registerMallocedBuffer(void* buffer) {
1194
MOZ_ASSERT(buffer);
1195
return mallocedBuffers.putNew(buffer);
1196
}
1197
1198
void js::Nursery::sweep(JSTracer* trc) {
1199
// Sweep unique IDs first before we sweep any tables that may be keyed based
1200
// on them.
1201
for (Cell* cell : cellsWithUid_) {
1202
JSObject* obj = static_cast<JSObject*>(cell);
1203
if (!IsForwarded(obj)) {
1204
obj->zone()->removeUniqueId(obj);
1205
} else {
1206
JSObject* dst = Forwarded(obj);
1207
dst->zone()->transferUniqueId(dst, obj);
1208
}
1209
}
1210
cellsWithUid_.clear();
1211
1212
for (CompartmentsIter c(runtime()); !c.done(); c.next()) {
1213
c->sweepAfterMinorGC(trc);
1214
}
1215
1216
for (ZonesIter zone(trc->runtime(), SkipAtoms); !zone.done(); zone.next()) {
1217
zone->sweepAfterMinorGC(trc);
1218
}
1219
1220
sweepDictionaryModeObjects();
1221
sweepMapAndSetObjects();
1222
}
1223
1224
void js::Nursery::clear() {
1225
// Poison the nursery contents so touching a freed object will crash.
1226
unsigned firstClearChunk;
1227
if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1228
// Poison all the chunks used in this cycle. The new start chunk is
1229
// reposioned in Nursery::collect() but there's no point optimising that in
1230
// this case.
1231
firstClearChunk = currentStartChunk_;
1232
} else {
1233
// In normal mode we start at the second chunk, the first one will be used
1234
// in the next cycle and poisoned in Nusery::collect();
1235
MOZ_ASSERT(currentStartChunk_ == 0);
1236
firstClearChunk = 1;
1237
}
1238
for (unsigned i = firstClearChunk; i < currentChunk_; ++i) {
1239
chunk(i).poisonAfterEvict();
1240
}
1241
// Clear only the used part of the chunk because that's the part we touched,
1242
// but only if it's not going to be re-used immediately (>= firstClearChunk).
1243
if (currentChunk_ >= firstClearChunk) {
1244
chunk(currentChunk_)
1245
.poisonAfterEvict(position() - chunk(currentChunk_).start());
1246
}
1247
1248
// Reset the start chunk & position if we're not in this zeal mode, or we're
1249
// in it and close to the end of the nursery.
1250
MOZ_ASSERT(maxChunkCount() > 0);
1251
if (!gc->hasZealMode(ZealMode::GenerationalGC) ||
1252
(gc->hasZealMode(ZealMode::GenerationalGC) &&
1253
currentChunk_ + 1 == maxChunkCount())) {
1254
setCurrentChunk(0);
1255
}
1256
1257
// Set current start position for isEmpty checks.
1258
setStartPosition();
1259
}
1260
1261
size_t js::Nursery::spaceToEnd(unsigned chunkCount) const {
1262
if (chunkCount == 0) {
1263
return 0;
1264
}
1265
1266
unsigned lastChunk = chunkCount - 1;
1267
1268
MOZ_ASSERT(lastChunk >= currentStartChunk_);
1269
MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <=
1270
NurseryChunkUsableSize);
1271
1272
size_t bytes;
1273
1274
if (chunkCount != 1) {
1275
// In the general case we have to add:
1276
// + the bytes used in the first
1277
// chunk which may be less than the total size of a chunk since in some
1278
// zeal modes we start the first chunk at some later position
1279
// (currentStartPosition_).
1280
// + the size of all the other chunks.
1281
bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
1282
((lastChunk - currentStartChunk_) * ChunkSize);
1283
} else {
1284
// In sub-chunk mode, but it also works whenever chunkCount == 1, we need to
1285
// use currentEnd_ since it may not refer to a full chunk.
1286
bytes = currentEnd_ - currentStartPosition_;
1287
}
1288
1289
MOZ_ASSERT(bytes <= maxChunkCount() * ChunkSize);
1290
1291
return bytes;
1292
}
1293
1294
MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno) {
1295
MOZ_ASSERT(chunkno < allocatedChunkCount());
1296
1297
currentChunk_ = chunkno;
1298
position_ = chunk(chunkno).start();
1299
setCurrentEnd();
1300
}
1301
1302
void js::Nursery::poisonAndInitCurrentChunk(size_t extent) {
1303
if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) {
1304
chunk(currentChunk_).poisonAndInit(runtime());
1305
} else {
1306
extent = std::min(capacity_, extent);
1307
MOZ_ASSERT(extent <= NurseryChunkUsableSize);
1308
chunk(currentChunk_).poisonAndInit(runtime(), extent);
1309
}
1310
}
1311
1312
MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
1313
MOZ_ASSERT_IF(isSubChunkMode(),
1314
currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
1315
currentEnd_ = chunk(currentChunk_).start() +
1316
std::min({capacity_, NurseryChunkUsableSize});
1317
if (canAllocateStrings_) {
1318
currentStringEnd_ = currentEnd_;
1319
}
1320
}
1321
1322
bool js::Nursery::allocateNextChunk(const unsigned chunkno,
1323
AutoLockGCBgAlloc& lock) {
1324
const unsigned priorCount = allocatedChunkCount();
1325
const unsigned newCount = priorCount + 1;
1326
1327
MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
1328
(chunkno == 0 && allocatedChunkCount() == 0));
1329
MOZ_ASSERT(chunkno == allocatedChunkCount());
1330
MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
1331
1332
if (!chunks_.resize(newCount)) {
1333
return false;
1334
}
1335
1336
Chunk* newChunk;
1337
newChunk = gc->getOrAllocChunk(lock);
1338
if (!newChunk) {
1339
chunks_.shrinkTo(priorCount);
1340
return false;
1341
}
1342
1343
chunks_[chunkno] = NurseryChunk::fromChunk(newChunk);
1344
return true;
1345
}
1346
1347
MOZ_ALWAYS_INLINE void js::Nursery::setStartPosition() {
1348
currentStartChunk_ = currentChunk_;
1349
currentStartPosition_ = position();
1350
}
1351
1352
void js::Nursery::maybeResizeNursery(JS::GCReason reason) {
1353
if (maybeResizeExact(reason)) {
1354
return;
1355
}
1356
1357
// This incorrect promotion rate results in better nursery sizing
1358
// decisions, however we should to better tuning based on the real
1359
// promotion rate in the future.
1360
const float promotionRate =
1361
float(previousGC.tenuredBytes) / float(previousGC.nurseryCapacity);
1362
1363
// Object lifetimes aren't going to behave linearly, but a better
1364
// relationship that works for all programs and can be predicted in
1365
// advance doesn't exist.
1366
static const float GrowThreshold = 0.03f;
1367
static const float ShrinkThreshold = 0.01f;
1368
static const float PromotionGoal = (GrowThreshold + ShrinkThreshold) / 2.0f;
1369
const float factor = promotionRate / PromotionGoal;
1370
MOZ_ASSERT(factor >= 0.0f);
1371
1372
#ifdef DEBUG
1373
// This is |... <= SIZE_MAX|, just without the implicit value-changing
1374
// conversion that expression would involve and modern clang would warn about.
1375
static const float SizeMaxPlusOne =
1376
2.0f * float(1ULL << (sizeof(void*) * CHAR_BIT - 1));
1377
MOZ_ASSERT((float(capacity()) * factor) < SizeMaxPlusOne);
1378
#endif
1379
1380
size_t newCapacity = size_t(float(capacity()) * factor);
1381
1382
const size_t minNurseryBytes = roundSize(tunables().gcMinNurseryBytes());
1383
MOZ_ASSERT(minNurseryBytes >= ArenaSize);
1384
const size_t maxNurseryBytes = roundSize(tunables().gcMaxNurseryBytes());
1385
MOZ_ASSERT(maxNurseryBytes >= ArenaSize);
1386
1387
// If one of these conditions is true then we always shrink or grow the
1388
// nursery. This way the thresholds still have an effect even if the goal
1389
// seeking says the current size is ideal.
1390
size_t lowLimit = std::max(minNurseryBytes, capacity() / 2);
1391
size_t highLimit =
1392
std::min(maxNurseryBytes, (CheckedInt<size_t>(capacity()) * 2).value());
1393
newCapacity = roundSize(mozilla::Clamp(newCapacity, lowLimit, highLimit));
1394
1395
if (capacity() < maxNurseryBytes && promotionRate > GrowThreshold &&
1396
newCapacity > capacity()) {
1397
growAllocableSpace(newCapacity);
1398
} else if (capacity() >= minNurseryBytes + SubChunkStep &&
1399
promotionRate < ShrinkThreshold && newCapacity < capacity()) {
1400
shrinkAllocableSpace(newCapacity);
1401
}
1402
}
1403
1404
bool js::Nursery::maybeResizeExact(JS::GCReason reason) {
1405
// Shrink the nursery to its minimum size if we ran out of memory or
1406
// received a memory pressure event.
1407
if (gc::IsOOMReason(reason) || gc->systemHasLowMemory()) {
1408
minimizeAllocableSpace();
1409
return true;
1410
}
1411
1412
#ifdef JS_GC_ZEAL
1413
// This zeal mode disabled nursery resizing.
1414
if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1415
return true;
1416
}
1417
#endif
1418
1419
MOZ_ASSERT(tunables().gcMaxNurseryBytes() >= ArenaSize);
1420
const size_t newMaxNurseryBytes = roundSize(tunables().gcMaxNurseryBytes());
1421
MOZ_ASSERT(newMaxNurseryBytes >= ArenaSize);
1422
1423
if (capacity_ > newMaxNurseryBytes) {
1424
// The configured maximum nursery size is changing.
1425
// We need to shrink the nursery.
1426
shrinkAllocableSpace(newMaxNurseryBytes);
1427
return true;
1428
}
1429
1430
const size_t newMinNurseryBytes = roundSize(tunables().gcMinNurseryBytes());
1431
MOZ_ASSERT(newMinNurseryBytes >= ArenaSize);
1432
1433
if (newMinNurseryBytes > capacity()) {
1434
// the configured minimum nursery size is changing, so grow the nursery.
1435
MOZ_ASSERT(newMinNurseryBytes <= roundSize(tunables().gcMaxNurseryBytes()));
1436
growAllocableSpace(newMinNurseryBytes);
1437
return true;
1438
}
1439
1440
return false;
1441
}
1442
1443
size_t js::Nursery::roundSize(size_t size) {
1444
if (size >= ChunkSize) {
1445
size = Round(size, ChunkSize);
1446
} else {
1447
size = std::min(Round(size, SubChunkStep),
1448
RoundDown(NurseryChunkUsableSize, SubChunkStep));
1449
}
1450
MOZ_ASSERT(size >= ArenaSize);
1451
return size;
1452
}
1453
1454
void js::Nursery::growAllocableSpace(size_t newCapacity) {
1455
MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
1456
MOZ_ASSERT(newCapacity <= roundSize(tunables().gcMaxNurseryBytes()));
1457
MOZ_ASSERT(newCapacity > capacity());
1458
1459
if (isSubChunkMode()) {
1460
// Avoid growing into an area that's about to be decommitted.
1461
decommitTask.join();
1462
1463
MOZ_ASSERT(currentChunk_ == 0);
1464
1465
// The remainder of the chunk may have been decommitted.
1466
if (!chunk(0).markPagesInUseHard(
1467
std::min(newCapacity, ChunkSize - ArenaSize))) {
1468
// The OS won't give us the memory we need, we can't grow.
1469
return;
1470
}
1471
1472
// The capacity has changed and since we were in sub-chunk mode we need to
1473
// update the poison values / asan infomation for the now-valid region of
1474
// this chunk.
1475
size_t poisonSize =
1476
std::min({newCapacity, NurseryChunkUsableSize}) - capacity();
1477
// Don't poison the trailer.
1478
MOZ_ASSERT(capacity() + poisonSize <= NurseryChunkUsableSize);
1479
chunk(0).poisonRange(capacity(), poisonSize, JS_FRESH_NURSERY_PATTERN,
1480
MemCheckKind::MakeUndefined);
1481
}
1482
1483
capacity_ = newCapacity;
1484
1485
setCurrentEnd();
1486
}
1487
1488
void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
1489
MOZ_ASSERT(firstFreeChunk < chunks_.length());
1490
1491
// The loop below may need to skip the first chunk, so we may use this so we
1492
// can modify it.
1493
unsigned firstChunkToDecommit = firstFreeChunk;
1494
1495
if ((firstChunkToDecommit == 0) && isSubChunkMode()) {
1496
// Part of the first chunk may be hard-decommitted, un-decommit it so that
1497
// the GC's normal chunk-handling doesn't segfault.
1498
MOZ_ASSERT(currentChunk_ == 0);
1499
if (!chunk(0).markPagesInUseHard(ChunkSize - ArenaSize)) {
1500
// Free the chunk if we can't allocate its pages.
1501
UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize);
1502
firstChunkToDecommit = 1;
1503
}
1504
}
1505
1506
{
1507
AutoLockHelperThreadState lock;
1508
for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) {
1509
decommitTask.queueChunk(chunks_[i], lock);
1510
}
1511
decommitTask.startOrRunIfIdle(lock);
1512
}
1513
1514
chunks_.shrinkTo(firstFreeChunk);
1515
}
1516
1517
void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
1518
#ifdef JS_GC_ZEAL
1519
if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1520
return;
1521
}
1522
#endif
1523
1524
// Don't shrink the nursery to zero (use Nursery::disable() instead)
1525
// This can't happen due to the rounding-down performed above because of the
1526
// clamping in maybeResizeNursery().
1527
MOZ_ASSERT(newCapacity != 0);
1528
// Don't attempt to shrink it to the same size.
1529
if (newCapacity == capacity_) {
1530
return;
1531
}
1532
MOZ_ASSERT(newCapacity < capacity_);
1533
1534
unsigned newCount = HowMany(newCapacity, ChunkSize);
1535
if (newCount < allocatedChunkCount()) {
1536
freeChunksFrom(newCount);
1537
}
1538
1539
size_t oldCapacity = capacity_;
1540
capacity_ = newCapacity;
1541
1542
setCurrentEnd();
1543
1544
if (isSubChunkMode()) {
1545
MOZ_ASSERT(currentChunk_ == 0);
1546
chunk(0).poisonRange(
1547
newCapacity,
1548
std::min({oldCapacity, NurseryChunkUsableSize}) - newCapacity,
1549
JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess);
1550
1551
AutoLockHelperThreadState lock;
1552
decommitTask.queueRange(capacity_, chunk(0), lock);
1553
decommitTask.startOrRunIfIdle(lock);
1554
}
1555
}
1556
1557
void js::Nursery::minimizeAllocableSpace() {
1558
if (capacity_ < roundSize(tunables().gcMinNurseryBytes())) {
1559
// The nursery is already smaller than the minimum size. This can happen
1560
// because changing parameters (like an increase in minimum size) can only
1561
// occur after a minor GC. See Bug 1585159.
1562
//
1563
// We could either do the /correct/ thing and increase the size to the
1564
// configured minimum size. Or do nothing, keeping the nursery smaller. We
1565
// do nothing because this can be executed as a last-ditch GC and we don't
1566
// want to add memory pressure then.
1567
return;
1568
}
1569
shrinkAllocableSpace(roundSize(tunables().gcMinNurseryBytes()));
1570
}
1571
1572
bool js::Nursery::queueDictionaryModeObjectToSweep(NativeObject* obj) {
1573
MOZ_ASSERT(IsInsideNursery(obj));
1574
return dictionaryModeObjects_.append(obj);
1575
}
1576
1577
uintptr_t js::Nursery::currentEnd() const {
1578
// These are separate asserts because it can be useful to see which one
1579
// failed.
1580
MOZ_ASSERT_IF(isSubChunkMode(), currentChunk_ == 0);
1581
MOZ_ASSERT_IF(isSubChunkMode(), currentEnd_ <= chunk(currentChunk_).end());
1582
MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end());
1583
MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start());
1584
return currentEnd_;
1585
}
1586
1587
gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); }
1588
1589
MOZ_ALWAYS_INLINE const js::gc::GCSchedulingTunables& js::Nursery::tunables()
1590
const {
1591
return gc->tunables;
1592
}
1593
1594
bool js::Nursery::isSubChunkMode() const {
1595
return capacity() <= NurseryChunkUsableSize;
1596
}
1597
1598
void js::Nursery::sweepDictionaryModeObjects() {
1599
for (auto obj : dictionaryModeObjects_) {
1600
if (!IsForwarded(obj)) {
1601
obj->sweepDictionaryListPointer();
1602
} else {
1603
Forwarded(obj)->updateDictionaryListPointerAfterMinorGC(obj);
1604
}
1605
}
1606
dictionaryModeObjects_.clear();
1607
}
1608
1609
void js::Nursery::sweepMapAndSetObjects() {
1610
auto fop = runtime()->defaultFreeOp();
1611
1612
for (auto mapobj : mapsWithNurseryMemory_) {
1613
MapObject::sweepAfterMinorGC(fop, mapobj);
1614
}
1615
mapsWithNurseryMemory_.clearAndFree();
1616
1617
for (auto setobj : setsWithNurseryMemory_) {
1618
SetObject::sweepAfterMinorGC(fop, setobj);
1619
}
1620
setsWithNurseryMemory_.clearAndFree();
1621
}
1622
1623
JS_PUBLIC_API void JS::EnableNurseryStrings(JSContext* cx) {
1624
AutoEmptyNursery empty(cx);
1625
ReleaseAllJITCode(cx->defaultFreeOp());
1626
cx->runtime()->gc.nursery().enableStrings();
1627
}
1628
1629
JS_PUBLIC_API void JS::DisableNurseryStrings(JSContext* cx) {
1630
AutoEmptyNursery empty(cx);
1631
ReleaseAllJITCode(cx->defaultFreeOp());
1632
cx->runtime()->gc.nursery().disableStrings();
1633
}