Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "gc/Allocator.h"
8
9
#include "mozilla/DebugOnly.h"
10
#include "mozilla/TimeStamp.h"
11
12
#include "gc/GCInternals.h"
13
#include "gc/GCLock.h"
14
#include "gc/GCTrace.h"
15
#include "gc/Nursery.h"
16
#include "jit/JitRealm.h"
17
#include "threading/CpuCount.h"
18
#include "util/Poison.h"
19
#include "vm/JSContext.h"
20
#include "vm/Runtime.h"
21
#include "vm/StringType.h"
22
23
#include "gc/ArenaList-inl.h"
24
#include "gc/Heap-inl.h"
25
#include "gc/PrivateIterators-inl.h"
26
#include "vm/JSObject-inl.h"
27
28
using mozilla::TimeDuration;
29
using mozilla::TimeStamp;
30
31
using namespace js;
32
using namespace gc;
33
34
template <AllowGC allowGC /* = CanGC */>
35
JSObject* js::AllocateObject(JSContext* cx, AllocKind kind,
36
size_t nDynamicSlots, InitialHeap heap,
37
const JSClass* clasp) {
38
MOZ_ASSERT(IsObjectAllocKind(kind));
39
size_t thingSize = Arena::thingSize(kind);
40
41
MOZ_ASSERT(thingSize == Arena::thingSize(kind));
42
MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
43
static_assert(
44
sizeof(JSObject_Slots0) >= MinCellSize,
45
"All allocations must be at least the allocator-imposed minimum size.");
46
47
MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative());
48
49
// We cannot trigger GC or make runtime assertions when nursery allocation
50
// is suppressed, either explicitly or because we are off-thread.
51
if (cx->isNurseryAllocSuppressed()) {
52
JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize,
53
nDynamicSlots);
54
if (MOZ_UNLIKELY(allowGC && !obj)) {
55
ReportOutOfMemory(cx);
56
}
57
return obj;
58
}
59
60
JSRuntime* rt = cx->runtime();
61
if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
62
return nullptr;
63
}
64
65
if (cx->nursery().isEnabled() && heap != TenuredHeap) {
66
JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize,
67
nDynamicSlots, clasp);
68
if (obj) {
69
return obj;
70
}
71
72
// Our most common non-jit allocation path is NoGC; thus, if we fail the
73
// alloc and cannot GC, we *must* return nullptr here so that the caller
74
// will do a CanGC allocation to clear the nursery. Failing to do so will
75
// cause all allocations on this path to land in Tenured, and we will not
76
// get the benefit of the nursery.
77
if (!allowGC) {
78
return nullptr;
79
}
80
}
81
82
return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize,
83
nDynamicSlots);
84
}
85
template JSObject* js::AllocateObject<NoGC>(JSContext* cx, gc::AllocKind kind,
86
size_t nDynamicSlots,
87
gc::InitialHeap heap,
88
const JSClass* clasp);
89
template JSObject* js::AllocateObject<CanGC>(JSContext* cx, gc::AllocKind kind,
90
size_t nDynamicSlots,
91
gc::InitialHeap heap,
92
const JSClass* clasp);
93
94
// Attempt to allocate a new JSObject out of the nursery. If there is not
95
// enough room in the nursery or there is an OOM, this method will return
96
// nullptr.
97
template <AllowGC allowGC>
98
JSObject* GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize,
99
size_t nDynamicSlots,
100
const JSClass* clasp) {
101
MOZ_RELEASE_ASSERT(!cx->isHelperThreadContext());
102
103
MOZ_ASSERT(cx->isNurseryAllocAllowed());
104
MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
105
MOZ_ASSERT(!cx->zone()->isAtomsZone());
106
107
JSObject* obj =
108
cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
109
if (obj) {
110
return obj;
111
}
112
113
if (allowGC && !cx->suppressGC) {
114
cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
115
116
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
117
if (cx->nursery().isEnabled()) {
118
return cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
119
}
120
}
121
return nullptr;
122
}
123
124
template <AllowGC allowGC>
125
JSObject* GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind,
126
size_t thingSize,
127
size_t nDynamicSlots) {
128
HeapSlot* slots = nullptr;
129
if (nDynamicSlots) {
130
slots = cx->maybe_pod_malloc<HeapSlot>(nDynamicSlots);
131
if (MOZ_UNLIKELY(!slots)) {
132
if (allowGC) {
133
ReportOutOfMemory(cx);
134
}
135
return nullptr;
136
}
137
Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
138
}
139
140
JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
141
142
if (obj) {
143
if (nDynamicSlots) {
144
static_cast<NativeObject*>(obj)->initSlots(slots);
145
AddCellMemory(obj, nDynamicSlots * sizeof(HeapSlot),
146
MemoryUse::ObjectSlots);
147
}
148
} else {
149
js_free(slots);
150
}
151
152
return obj;
153
}
154
155
// Attempt to allocate a new string out of the nursery. If there is not enough
156
// room in the nursery or there is an OOM, this method will return nullptr.
157
template <AllowGC allowGC>
158
JSString* GCRuntime::tryNewNurseryString(JSContext* cx, size_t thingSize,
159
AllocKind kind) {
160
MOZ_ASSERT(IsNurseryAllocable(kind));
161
MOZ_ASSERT(cx->isNurseryAllocAllowed());
162
MOZ_ASSERT(!cx->isHelperThreadContext());
163
MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
164
MOZ_ASSERT(!cx->zone()->isAtomsZone());
165
166
Cell* cell = cx->nursery().allocateString(cx->zone(), thingSize, kind);
167
if (cell) {
168
return static_cast<JSString*>(cell);
169
}
170
171
if (allowGC && !cx->suppressGC) {
172
cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
173
174
// Exceeding gcMaxBytes while tenuring can disable the Nursery, and
175
// other heuristics can disable nursery strings for this zone.
176
if (cx->nursery().isEnabled() && cx->zone()->allocNurseryStrings) {
177
return static_cast<JSString*>(
178
cx->nursery().allocateString(cx->zone(), thingSize, kind));
179
}
180
}
181
return nullptr;
182
}
183
184
template <typename StringAllocT, AllowGC allowGC /* = CanGC */>
185
StringAllocT* js::AllocateStringImpl(JSContext* cx, InitialHeap heap) {
186
static_assert(mozilla::IsConvertible<StringAllocT*, JSString*>::value,
187
"must be JSString derived");
188
189
AllocKind kind = MapTypeToFinalizeKind<StringAllocT>::kind;
190
size_t size = sizeof(StringAllocT);
191
MOZ_ASSERT(size == Arena::thingSize(kind));
192
MOZ_ASSERT(size == sizeof(JSString) || size == sizeof(JSFatInlineString));
193
194
// Off-thread alloc cannot trigger GC or make runtime assertions.
195
if (cx->isNurseryAllocSuppressed()) {
196
StringAllocT* str =
197
GCRuntime::tryNewTenuredThing<StringAllocT, NoGC>(cx, kind, size);
198
if (MOZ_UNLIKELY(allowGC && !str)) {
199
ReportOutOfMemory(cx);
200
}
201
return str;
202
}
203
204
JSRuntime* rt = cx->runtime();
205
if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
206
return nullptr;
207
}
208
209
if (cx->nursery().isEnabled() && heap != TenuredHeap &&
210
cx->nursery().canAllocateStrings() && cx->zone()->allocNurseryStrings) {
211
auto str = static_cast<StringAllocT*>(
212
rt->gc.tryNewNurseryString<allowGC>(cx, size, kind));
213
if (str) {
214
return str;
215
}
216
217
// Our most common non-jit allocation path is NoGC; thus, if we fail the
218
// alloc and cannot GC, we *must* return nullptr here so that the caller
219
// will do a CanGC allocation to clear the nursery. Failing to do so will
220
// cause all allocations on this path to land in Tenured, and we will not
221
// get the benefit of the nursery.
222
if (!allowGC) {
223
return nullptr;
224
}
225
}
226
227
return GCRuntime::tryNewTenuredThing<StringAllocT, allowGC>(cx, kind, size);
228
}
229
230
// Attempt to allocate a new BigInt out of the nursery. If there is not enough
231
// room in the nursery or there is an OOM, this method will return nullptr.
232
template <AllowGC allowGC>
233
JS::BigInt* GCRuntime::tryNewNurseryBigInt(JSContext* cx, size_t thingSize,
234
AllocKind kind) {
235
MOZ_ASSERT(IsNurseryAllocable(kind));
236
MOZ_ASSERT(cx->isNurseryAllocAllowed());
237
MOZ_ASSERT(!cx->isHelperThreadContext());
238
MOZ_ASSERT(!cx->isNurseryAllocSuppressed());
239
MOZ_ASSERT(!cx->zone()->isAtomsZone());
240
241
Cell* cell = cx->nursery().allocateBigInt(cx->zone(), thingSize, kind);
242
if (cell) {
243
return static_cast<JS::BigInt*>(cell);
244
}
245
246
if (allowGC && !cx->suppressGC) {
247
cx->runtime()->gc.minorGC(JS::GCReason::OUT_OF_NURSERY);
248
249
// Exceeding gcMaxBytes while tenuring can disable the Nursery, and
250
// other heuristics can disable nursery BigInts for this zone.
251
if (cx->nursery().isEnabled() && cx->zone()->allocNurseryBigInts) {
252
return static_cast<JS::BigInt*>(
253
cx->nursery().allocateBigInt(cx->zone(), thingSize, kind));
254
}
255
}
256
return nullptr;
257
}
258
259
template <AllowGC allowGC /* = CanGC */>
260
JS::BigInt* js::AllocateBigInt(JSContext* cx, InitialHeap heap) {
261
AllocKind kind = MapTypeToFinalizeKind<JS::BigInt>::kind;
262
size_t size = sizeof(JS::BigInt);
263
MOZ_ASSERT(size == Arena::thingSize(kind));
264
265
// Off-thread alloc cannot trigger GC or make runtime assertions.
266
if (cx->isNurseryAllocSuppressed()) {
267
JS::BigInt* bi =
268
GCRuntime::tryNewTenuredThing<JS::BigInt, NoGC>(cx, kind, size);
269
if (MOZ_UNLIKELY(allowGC && !bi)) {
270
ReportOutOfMemory(cx);
271
}
272
return bi;
273
}
274
275
JSRuntime* rt = cx->runtime();
276
if (!rt->gc.checkAllocatorState<allowGC>(cx, kind)) {
277
return nullptr;
278
}
279
280
if (cx->nursery().isEnabled() && heap != TenuredHeap &&
281
cx->nursery().canAllocateBigInts() && cx->zone()->allocNurseryBigInts) {
282
auto bi = static_cast<JS::BigInt*>(
283
rt->gc.tryNewNurseryBigInt<allowGC>(cx, size, kind));
284
if (bi) {
285
return bi;
286
}
287
288
// Our most common non-jit allocation path is NoGC; thus, if we fail the
289
// alloc and cannot GC, we *must* return nullptr here so that the caller
290
// will do a CanGC allocation to clear the nursery. Failing to do so will
291
// cause all allocations on this path to land in Tenured, and we will not
292
// get the benefit of the nursery.
293
if (!allowGC) {
294
return nullptr;
295
}
296
}
297
298
return GCRuntime::tryNewTenuredThing<JS::BigInt, allowGC>(cx, kind, size);
299
}
300
template JS::BigInt* js::AllocateBigInt<NoGC>(JSContext* cx,
301
gc::InitialHeap heap);
302
template JS::BigInt* js::AllocateBigInt<CanGC>(JSContext* cx,
303
gc::InitialHeap heap);
304
305
#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
306
bgfinal, nursery, compact) \
307
template type* js::AllocateStringImpl<type, NoGC>(JSContext * cx, \
308
InitialHeap heap); \
309
template type* js::AllocateStringImpl<type, CanGC>(JSContext * cx, \
310
InitialHeap heap);
311
FOR_EACH_NURSERY_STRING_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
312
#undef DECL_ALLOCATOR_INSTANCES
313
314
template <typename T, AllowGC allowGC /* = CanGC */>
315
T* js::Allocate(JSContext* cx) {
316
static_assert(!mozilla::IsConvertible<T*, JSObject*>::value,
317
"must not be JSObject derived");
318
static_assert(
319
sizeof(T) >= MinCellSize,
320
"All allocations must be at least the allocator-imposed minimum size.");
321
322
AllocKind kind = MapTypeToFinalizeKind<T>::kind;
323
size_t thingSize = sizeof(T);
324
MOZ_ASSERT(thingSize == Arena::thingSize(kind));
325
326
if (!cx->isHelperThreadContext()) {
327
if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind)) {
328
return nullptr;
329
}
330
}
331
332
return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
333
}
334
335
#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType, \
336
bgFinal, nursery, compact) \
337
template type* js::Allocate<type, NoGC>(JSContext * cx); \
338
template type* js::Allocate<type, CanGC>(JSContext * cx);
339
FOR_EACH_NONOBJECT_NONNURSERY_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
340
#undef DECL_ALLOCATOR_INSTANCES
341
342
template <typename T, AllowGC allowGC>
343
/* static */
344
T* GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind,
345
size_t thingSize) {
346
// Bump allocate in the arena's current free-list span.
347
T* t = reinterpret_cast<T*>(cx->freeLists().allocate(kind));
348
if (MOZ_UNLIKELY(!t)) {
349
// Get the next available free list and allocate out of it. This may
350
// acquire a new arena, which will lock the chunk list. If there are no
351
// chunks available it may also allocate new memory directly.
352
t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind));
353
354
if (MOZ_UNLIKELY(!t)) {
355
if (allowGC) {
356
cx->runtime()->gc.attemptLastDitchGC(cx);
357
t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
358
}
359
if (!t) {
360
if (allowGC) {
361
ReportOutOfMemory(cx);
362
}
363
return nullptr;
364
}
365
}
366
}
367
368
checkIncrementalZoneState(cx, t);
369
gcTracer.traceTenuredAlloc(t, kind);
370
// We count this regardless of the profiler's state, assuming that it costs
371
// just as much to count it, as to check the profiler's state and decide not
372
// to count it.
373
cx->noteTenuredAlloc();
374
return t;
375
}
376
377
void GCRuntime::attemptLastDitchGC(JSContext* cx) {
378
// Either there was no memory available for a new chunk or the heap hit its
379
// size limit. Try to perform an all-compartments, non-incremental, shrinking
380
// GC and wait for it to finish.
381
382
if (cx->isHelperThreadContext()) {
383
return;
384
}
385
386
if (!lastLastDitchTime.IsNull() &&
387
TimeStamp::Now() - lastLastDitchTime <= tunables.minLastDitchGCPeriod()) {
388
return;
389
}
390
391
JS::PrepareForFullGC(cx);
392
gc(GC_SHRINK, JS::GCReason::LAST_DITCH);
393
waitBackgroundAllocEnd();
394
waitBackgroundFreeEnd();
395
396
lastLastDitchTime = mozilla::TimeStamp::Now();
397
}
398
399
template <AllowGC allowGC>
400
bool GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind) {
401
if (allowGC) {
402
if (!gcIfNeededAtAllocation(cx)) {
403
return false;
404
}
405
}
406
407
#if defined(JS_GC_ZEAL) || defined(DEBUG)
408
MOZ_ASSERT_IF(cx->zone()->isAtomsZone(),
409
kind == AllocKind::ATOM || kind == AllocKind::FAT_INLINE_ATOM ||
410
kind == AllocKind::SYMBOL || kind == AllocKind::JITCODE ||
411
kind == AllocKind::SCOPE);
412
MOZ_ASSERT_IF(!cx->zone()->isAtomsZone(),
413
kind != AllocKind::ATOM && kind != AllocKind::FAT_INLINE_ATOM);
414
MOZ_ASSERT_IF(cx->zone()->isSelfHostingZone(),
415
!rt->parentRuntime && !selfHostingZoneFrozen);
416
MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
417
#endif
418
419
// Crash if we perform a GC action when it is not safe.
420
if (allowGC && !cx->suppressGC) {
421
cx->verifyIsSafeToGC();
422
}
423
424
// For testing out of memory conditions
425
if (js::oom::ShouldFailWithOOM()) {
426
// If we are doing a fallible allocation, percolate up the OOM
427
// instead of reporting it.
428
if (allowGC) {
429
ReportOutOfMemory(cx);
430
}
431
return false;
432
}
433
434
return true;
435
}
436
437
bool GCRuntime::gcIfNeededAtAllocation(JSContext* cx) {
438
#ifdef JS_GC_ZEAL
439
if (needZealousGC()) {
440
runDebugGC();
441
}
442
#endif
443
444
// Invoking the interrupt callback can fail and we can't usefully
445
// handle that here. Just check in case we need to collect instead.
446
if (cx->hasAnyPendingInterrupt()) {
447
gcIfRequested();
448
}
449
450
// If we have grown past our non-incremental heap threshold while in the
451
// middle of an incremental GC, we're growing faster than we're GCing, so stop
452
// the world and do a full, non-incremental GC right now, if possible.
453
Zone* zone = cx->zone();
454
if (isIncrementalGCInProgress() &&
455
zone->gcHeapSize.bytes() >
456
zone->gcHeapThreshold.nonIncrementalTriggerBytes(tunables)) {
457
PrepareZoneForGC(cx->zone());
458
gc(GC_NORMAL, JS::GCReason::INCREMENTAL_TOO_SLOW);
459
}
460
461
return true;
462
}
463
464
template <typename T>
465
/* static */
466
void GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t) {
467
#ifdef DEBUG
468
if (cx->isHelperThreadContext() || !t) {
469
return;
470
}
471
472
TenuredCell* cell = &t->asTenured();
473
Zone* zone = cell->zone();
474
if (zone->isGCMarking() || zone->isGCSweeping()) {
475
MOZ_ASSERT(cell->isMarkedBlack());
476
} else {
477
MOZ_ASSERT(!cell->isMarkedAny());
478
}
479
#endif
480
}
481
482
TenuredCell* js::gc::AllocateCellInGC(Zone* zone, AllocKind thingKind) {
483
TenuredCell* cell = zone->arenas.allocateFromFreeList(thingKind);
484
if (!cell) {
485
AutoEnterOOMUnsafeRegion oomUnsafe;
486
cell = GCRuntime::refillFreeListInGC(zone, thingKind);
487
if (!cell) {
488
oomUnsafe.crash(ChunkSize, "Failed not allocate new chunk during GC");
489
}
490
}
491
return cell;
492
}
493
494
// /////////// Arena -> Thing Allocator //////////////////////////////////////
495
496
void GCRuntime::startBackgroundAllocTaskIfIdle() {
497
AutoLockHelperThreadState lock;
498
if (!allocTask.wasStarted(lock)) {
499
// Join the previous invocation of the task. This will return immediately
500
// if the thread has never been started.
501
allocTask.joinWithLockHeld(lock);
502
allocTask.startWithLockHeld(lock);
503
}
504
}
505
506
/* static */
507
TenuredCell* GCRuntime::refillFreeListFromAnyThread(JSContext* cx,
508
AllocKind thingKind) {
509
MOZ_ASSERT(cx->freeLists().isEmpty(thingKind));
510
511
if (!cx->isHelperThreadContext()) {
512
return refillFreeListFromMainThread(cx, thingKind);
513
}
514
515
return refillFreeListFromHelperThread(cx, thingKind);
516
}
517
518
/* static */
519
TenuredCell* GCRuntime::refillFreeListFromMainThread(JSContext* cx,
520
AllocKind thingKind) {
521
// It should not be possible to allocate on the main thread while we are
522
// inside a GC.
523
MOZ_ASSERT(!JS::RuntimeHeapIsBusy(), "allocating while under GC");
524
525
return cx->zone()->arenas.refillFreeListAndAllocate(
526
cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
527
}
528
529
/* static */
530
TenuredCell* GCRuntime::refillFreeListFromHelperThread(JSContext* cx,
531
AllocKind thingKind) {
532
// A GC may be happening on the main thread, but zones used by off thread
533
// tasks are never collected.
534
Zone* zone = cx->zone();
535
MOZ_ASSERT(!zone->wasGCStarted());
536
537
return zone->arenas.refillFreeListAndAllocate(
538
cx->freeLists(), thingKind, ShouldCheckThresholds::CheckThresholds);
539
}
540
541
/* static */
542
TenuredCell* GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind) {
543
// Called by compacting GC to refill a free list while we are in a GC.
544
MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
545
MOZ_ASSERT_IF(!JS::RuntimeHeapIsMinorCollecting(),
546
!zone->runtimeFromMainThread()->gc.isBackgroundSweeping());
547
548
return zone->arenas.refillFreeListAndAllocate(
549
zone->arenas.freeLists(), thingKind,
550
ShouldCheckThresholds::DontCheckThresholds);
551
}
552
553
TenuredCell* ArenaLists::refillFreeListAndAllocate(
554
FreeLists& freeLists, AllocKind thingKind,
555
ShouldCheckThresholds checkThresholds) {
556
MOZ_ASSERT(freeLists.isEmpty(thingKind));
557
558
JSRuntime* rt = runtimeFromAnyThread();
559
560
mozilla::Maybe<AutoLockGCBgAlloc> maybeLock;
561
562
// See if we can proceed without taking the GC lock.
563
if (concurrentUse(thingKind) != ConcurrentUse::None) {
564
maybeLock.emplace(rt);
565
}
566
567
ArenaList& al = arenaLists(thingKind);
568
Arena* arena = al.takeNextArena();
569
if (arena) {
570
// Empty arenas should be immediately freed.
571
MOZ_ASSERT(!arena->isEmpty());
572
573
return freeLists.setArenaAndAllocate(arena, thingKind);
574
}
575
576
// Parallel threads have their own ArenaLists, but chunks are shared;
577
// if we haven't already, take the GC lock now to avoid racing.
578
if (maybeLock.isNothing()) {
579
maybeLock.emplace(rt);
580
}
581
582
Chunk* chunk = rt->gc.pickChunk(maybeLock.ref());
583
if (!chunk) {
584
return nullptr;
585
}
586
587
// Although our chunk should definitely have enough space for another arena,
588
// there are other valid reasons why Chunk::allocateArena() may fail.
589
arena = rt->gc.allocateArena(chunk, zone_, thingKind, checkThresholds,
590
maybeLock.ref());
591
if (!arena) {
592
return nullptr;
593
}
594
595
MOZ_ASSERT(al.isCursorAtEnd());
596
al.insertBeforeCursor(arena);
597
598
return freeLists.setArenaAndAllocate(arena, thingKind);
599
}
600
601
inline TenuredCell* FreeLists::setArenaAndAllocate(Arena* arena,
602
AllocKind kind) {
603
#ifdef DEBUG
604
auto old = freeLists_[kind];
605
if (!old->isEmpty()) {
606
old->getArena()->checkNoMarkedFreeCells();
607
}
608
#endif
609
610
FreeSpan* span = arena->getFirstFreeSpan();
611
freeLists_[kind] = span;
612
613
if (MOZ_UNLIKELY(arena->zone->wasGCStarted())) {
614
arena->arenaAllocatedDuringGC();
615
}
616
617
TenuredCell* thing = span->allocate(Arena::thingSize(kind));
618
MOZ_ASSERT(thing); // This allocation is infallible.
619
620
return thing;
621
}
622
623
void Arena::arenaAllocatedDuringGC() {
624
// Ensure that anything allocated during the mark or sweep phases of an
625
// incremental GC will be marked black by pre-marking all free cells in the
626
// arena we are about to allocate from.
627
628
if (zone->needsIncrementalBarrier() || zone->isGCSweeping()) {
629
for (ArenaFreeCellIter iter(this); !iter.done(); iter.next()) {
630
TenuredCell* cell = iter.getCell();
631
MOZ_ASSERT(!cell->isMarkedAny());
632
cell->markBlack();
633
}
634
}
635
}
636
637
void GCRuntime::setParallelAtomsAllocEnabled(bool enabled) {
638
// This can only be changed on the main thread otherwise we could race.
639
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
640
MOZ_ASSERT(enabled == rt->hasHelperThreadZones());
641
642
atomsZone->arenas.setParallelAllocEnabled(enabled);
643
}
644
645
void ArenaLists::setParallelAllocEnabled(bool enabled) {
646
MOZ_ASSERT(zone_->isAtomsZone());
647
648
static const ConcurrentUse states[2] = {ConcurrentUse::None,
649
ConcurrentUse::ParallelAlloc};
650
651
for (auto kind : AllAllocKinds()) {
652
MOZ_ASSERT(concurrentUse(kind) == states[!enabled]);
653
concurrentUse(kind) = states[enabled];
654
}
655
}
656
657
// /////////// Chunk -> Arena Allocator //////////////////////////////////////
658
659
bool GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const {
660
// To minimize memory waste, we do not want to run the background chunk
661
// allocation if we already have some empty chunks or when the runtime has
662
// a small heap size (and therefore likely has a small growth rate).
663
return allocTask.enabled() &&
664
emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
665
(fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
666
}
667
668
Arena* GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
669
ShouldCheckThresholds checkThresholds,
670
const AutoLockGC& lock) {
671
MOZ_ASSERT(chunk->hasAvailableArenas());
672
673
// Fail the allocation if we are over our heap size limits.
674
if ((checkThresholds != ShouldCheckThresholds::DontCheckThresholds) &&
675
(heapSize.bytes() >= tunables.gcMaxBytes()))
676
return nullptr;
677
678
Arena* arena = chunk->allocateArena(this, zone, thingKind, lock);
679
zone->gcHeapSize.addGCArena();
680
681
// Trigger an incremental slice if needed.
682
if (checkThresholds != ShouldCheckThresholds::DontCheckThresholds) {
683
maybeAllocTriggerZoneGC(zone, ArenaSize);
684
}
685
686
return arena;
687
}
688
689
Arena* Chunk::allocateArena(GCRuntime* gc, Zone* zone, AllocKind thingKind,
690
const AutoLockGC& lock) {
691
Arena* arena = info.numArenasFreeCommitted > 0 ? fetchNextFreeArena(gc)
692
: fetchNextDecommittedArena();
693
arena->init(zone, thingKind, lock);
694
updateChunkListAfterAlloc(gc, lock);
695
return arena;
696
}
697
698
inline void GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info) {
699
MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
700
--numArenasFreeCommitted;
701
}
702
703
Arena* Chunk::fetchNextFreeArena(GCRuntime* gc) {
704
MOZ_ASSERT(info.numArenasFreeCommitted > 0);
705
MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
706
707
Arena* arena = info.freeArenasHead;
708
info.freeArenasHead = arena->next;
709
--info.numArenasFreeCommitted;
710
--info.numArenasFree;
711
gc->updateOnFreeArenaAlloc(info);
712
713
return arena;
714
}
715
716
Arena* Chunk::fetchNextDecommittedArena() {
717
MOZ_ASSERT(info.numArenasFreeCommitted == 0);
718
MOZ_ASSERT(info.numArenasFree > 0);
719
720
unsigned offset = findDecommittedArenaOffset();
721
info.lastDecommittedArenaOffset = offset + 1;
722
--info.numArenasFree;
723
decommittedArenas.unset(offset);
724
725
Arena* arena = &arenas[offset];
726
MarkPagesInUseSoft(arena, ArenaSize);
727
arena->setAsNotAllocated();
728
729
return arena;
730
}
731
732
/*
733
* Search for and return the next decommitted Arena. Our goal is to keep
734
* lastDecommittedArenaOffset "close" to a free arena. We do this by setting
735
* it to the most recently freed arena when we free, and forcing it to
736
* the last alloc + 1 when we allocate.
737
*/
738
uint32_t Chunk::findDecommittedArenaOffset() {
739
/* Note: lastFreeArenaOffset can be past the end of the list. */
740
for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
741
if (decommittedArenas.get(i)) {
742
return i;
743
}
744
}
745
for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
746
if (decommittedArenas.get(i)) {
747
return i;
748
}
749
}
750
MOZ_CRASH("No decommitted arenas found.");
751
}
752
753
// /////////// System -> Chunk Allocator /////////////////////////////////////
754
755
Chunk* GCRuntime::getOrAllocChunk(AutoLockGCBgAlloc& lock) {
756
Chunk* chunk = emptyChunks(lock).pop();
757
if (!chunk) {
758
chunk = Chunk::allocate(this);
759
if (!chunk) {
760
return nullptr;
761
}
762
MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
763
}
764
765
if (wantBackgroundAllocation(lock)) {
766
lock.tryToStartBackgroundAllocation();
767
}
768
769
return chunk;
770
}
771
772
void GCRuntime::recycleChunk(Chunk* chunk, const AutoLockGC& lock) {
773
AlwaysPoison(&chunk->trailer, JS_FREED_CHUNK_PATTERN, sizeof(ChunkTrailer),
774
MemCheckKind::MakeNoAccess);
775
emptyChunks(lock).push(chunk);
776
}
777
778
Chunk* GCRuntime::pickChunk(AutoLockGCBgAlloc& lock) {
779
if (availableChunks(lock).count()) {
780
return availableChunks(lock).head();
781
}
782
783
Chunk* chunk = getOrAllocChunk(lock);
784
if (!chunk) {
785
return nullptr;
786
}
787
788
chunk->init(this);
789
MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
790
MOZ_ASSERT(chunk->unused());
791
MOZ_ASSERT(!fullChunks(lock).contains(chunk));
792
MOZ_ASSERT(!availableChunks(lock).contains(chunk));
793
794
availableChunks(lock).push(chunk);
795
796
return chunk;
797
}
798
799
BackgroundAllocTask::BackgroundAllocTask(GCRuntime* gc, ChunkPool& pool)
800
: GCParallelTaskHelper(gc),
801
chunkPool_(pool),
802
enabled_(CanUseExtraThreads() && GetCPUCount() >= 2) {}
803
804
void BackgroundAllocTask::run() {
805
TraceLoggerThread* logger = TraceLoggerForCurrentThread();
806
AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
807
808
AutoLockGC lock(gc);
809
while (!cancel_ && gc->wantBackgroundAllocation(lock)) {
810
Chunk* chunk;
811
{
812
AutoUnlockGC unlock(lock);
813
chunk = Chunk::allocate(gc);
814
if (!chunk) {
815
break;
816
}
817
chunk->init(gc);
818
}
819
chunkPool_.ref().push(chunk);
820
}
821
}
822
823
/* static */
824
Chunk* Chunk::allocate(GCRuntime* gc) {
825
Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
826
if (!chunk) {
827
return nullptr;
828
}
829
gc->stats().count(gcstats::COUNT_NEW_CHUNK);
830
return chunk;
831
}
832
833
void Chunk::init(GCRuntime* gc) {
834
/* The chunk may still have some regions marked as no-access. */
835
MOZ_MAKE_MEM_UNDEFINED(this, ChunkSize);
836
837
/*
838
* Poison the chunk. Note that decommitAllArenas() below will mark the
839
* arenas as inaccessible (for memory sanitizers).
840
*/
841
Poison(this, JS_FRESH_TENURED_PATTERN, ChunkSize,
842
MemCheckKind::MakeUndefined);
843
844
/*
845
* We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
846
* on uninitialized data, which would happen before the first GC cycle.
847
*/
848
bitmap.clear();
849
850
/*
851
* Decommit the arenas. We do this after poisoning so that if the OS does
852
* not have to recycle the pages, we still get the benefit of poisoning.
853
*/
854
decommitAllArenas();
855
856
/* Initialize the chunk info. */
857
info.init();
858
new (&trailer) ChunkTrailer(gc->rt);
859
860
/* The rest of info fields are initialized in pickChunk. */
861
}
862
863
void Chunk::decommitAllArenas() {
864
decommittedArenas.clear(true);
865
MarkPagesUnusedSoft(&arenas[0], ArenasPerChunk * ArenaSize);
866
867
info.freeArenasHead = nullptr;
868
info.lastDecommittedArenaOffset = 0;
869
info.numArenasFree = ArenasPerChunk;
870
info.numArenasFreeCommitted = 0;
871
}