Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#ifndef ds_LifoAlloc_h
8
#define ds_LifoAlloc_h
9
10
#include "mozilla/Attributes.h"
11
#include "mozilla/MathAlgorithms.h"
12
#include "mozilla/MemoryChecking.h"
13
#include "mozilla/MemoryReporting.h"
14
#include "mozilla/Move.h"
15
#include "mozilla/PodOperations.h"
16
#include "mozilla/TemplateLib.h"
17
#include "mozilla/TypeTraits.h"
18
19
#include <algorithm>
20
#include <new>
21
#include <stddef.h> // size_t
22
23
// This data structure supports stacky LIFO allocation (mark/release and
24
// LifoAllocScope). It does not maintain one contiguous segment; instead, it
25
// maintains a bunch of linked memory segments. In order to prevent malloc/free
26
// thrashing, unused segments are deallocated when garbage collection occurs.
27
28
#include "js/UniquePtr.h"
29
#include "util/Memory.h"
30
#include "util/Poison.h"
31
32
namespace js {
33
34
namespace detail {
35
36
template <typename T, typename D>
37
class SingleLinkedList;
38
39
template <typename T, typename D = JS::DeletePolicy<T>>
40
class SingleLinkedListElement {
41
friend class SingleLinkedList<T, D>;
42
js::UniquePtr<T, D> next_;
43
44
public:
45
SingleLinkedListElement() : next_(nullptr) {}
46
~SingleLinkedListElement() { MOZ_ASSERT(!next_); }
47
48
T* next() const { return next_.get(); }
49
};
50
51
// Single linked list which is using UniquePtr to hold the next pointers.
52
// UniquePtr are used to ensure that none of the elements are used
53
// silmutaneously in 2 different list.
54
template <typename T, typename D = JS::DeletePolicy<T>>
55
class SingleLinkedList {
56
private:
57
// First element of the list which owns the next element, and ensure that
58
// that this list is the only owner of the element.
59
UniquePtr<T, D> head_;
60
61
// Weak pointer to the last element of the list.
62
T* last_;
63
64
void assertInvariants() {
65
MOZ_ASSERT(bool(head_) == bool(last_));
66
MOZ_ASSERT_IF(last_, !last_->next_);
67
}
68
69
public:
70
SingleLinkedList() : head_(nullptr), last_(nullptr) { assertInvariants(); }
71
72
SingleLinkedList(SingleLinkedList&& other)
73
: head_(std::move(other.head_)), last_(other.last_) {
74
other.last_ = nullptr;
75
assertInvariants();
76
other.assertInvariants();
77
}
78
79
~SingleLinkedList() {
80
MOZ_ASSERT(!head_);
81
MOZ_ASSERT(!last_);
82
}
83
84
// Move the elements of the |other| list in the current one, and implicitly
85
// remove all the elements of the current list.
86
SingleLinkedList& operator=(SingleLinkedList&& other) {
87
head_ = std::move(other.head_);
88
last_ = other.last_;
89
other.last_ = nullptr;
90
assertInvariants();
91
other.assertInvariants();
92
return *this;
93
}
94
95
bool empty() const { return !last_; }
96
97
// Iterates over the elements of the list, this is used to avoid raw
98
// manipulation of pointers as much as possible.
99
class Iterator {
100
T* current_;
101
102
public:
103
explicit Iterator(T* current) : current_(current) {}
104
105
T& operator*() const { return *current_; }
106
T* operator->() const { return current_; }
107
T* get() const { return current_; }
108
109
const Iterator& operator++() {
110
current_ = current_->next();
111
return *this;
112
}
113
114
bool operator!=(const Iterator& other) const {
115
return current_ != other.current_;
116
}
117
bool operator==(const Iterator& other) const {
118
return current_ == other.current_;
119
}
120
};
121
122
Iterator begin() const { return Iterator(head_.get()); }
123
Iterator end() const { return Iterator(nullptr); }
124
Iterator last() const { return Iterator(last_); }
125
126
// Split the list in 2 single linked lists after the element given as
127
// argument. The front of the list remains in the current list, while the
128
// back goes in the newly create linked list.
129
//
130
// This is used for example to extract one element from a list. (see
131
// LifoAlloc::getOrCreateChunk)
132
SingleLinkedList splitAfter(T* newLast) {
133
MOZ_ASSERT(newLast);
134
SingleLinkedList result;
135
if (newLast->next_) {
136
result.head_ = std::move(newLast->next_);
137
result.last_ = last_;
138
last_ = newLast;
139
}
140
assertInvariants();
141
result.assertInvariants();
142
return result;
143
}
144
145
void pushFront(UniquePtr<T, D>&& elem) {
146
if (!last_) {
147
last_ = elem.get();
148
}
149
elem->next_ = std::move(head_);
150
head_ = std::move(elem);
151
assertInvariants();
152
}
153
154
void append(UniquePtr<T, D>&& elem) {
155
if (last_) {
156
last_->next_ = std::move(elem);
157
last_ = last_->next_.get();
158
} else {
159
head_ = std::move(elem);
160
last_ = head_.get();
161
}
162
assertInvariants();
163
}
164
void appendAll(SingleLinkedList&& list) {
165
if (list.empty()) {
166
return;
167
}
168
if (last_) {
169
last_->next_ = std::move(list.head_);
170
} else {
171
head_ = std::move(list.head_);
172
}
173
last_ = list.last_;
174
list.last_ = nullptr;
175
assertInvariants();
176
list.assertInvariants();
177
}
178
void steal(SingleLinkedList&& list) {
179
head_ = std::move(list.head_);
180
last_ = list.last_;
181
list.last_ = nullptr;
182
assertInvariants();
183
list.assertInvariants();
184
}
185
void prependAll(SingleLinkedList&& list) {
186
list.appendAll(std::move(*this));
187
steal(std::move(list));
188
}
189
UniquePtr<T, D> popFirst() {
190
MOZ_ASSERT(head_);
191
UniquePtr<T, D> result = std::move(head_);
192
head_ = std::move(result->next_);
193
if (!head_) {
194
last_ = nullptr;
195
}
196
assertInvariants();
197
return result;
198
}
199
};
200
201
static const size_t LIFO_ALLOC_ALIGN = 8;
202
203
MOZ_ALWAYS_INLINE
204
uint8_t* AlignPtr(uint8_t* orig) {
205
static_assert(mozilla::IsPowerOfTwo(LIFO_ALLOC_ALIGN),
206
"LIFO_ALLOC_ALIGN must be a power of two");
207
208
uint8_t* result = (uint8_t*)AlignBytes(uintptr_t(orig), LIFO_ALLOC_ALIGN);
209
MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
210
return result;
211
}
212
213
// A Chunk represent a single memory allocation made with the system
214
// allocator. As the owner of the memory, it is responsible for the allocation
215
// and the deallocation.
216
//
217
// This structure is only move-able, but not copyable.
218
class BumpChunk : public SingleLinkedListElement<BumpChunk> {
219
private:
220
// Pointer to the last byte allocated in this chunk.
221
uint8_t* bump_;
222
// Pointer to the first byte after this chunk.
223
uint8_t* const capacity_;
224
225
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
226
// Magic number used to check against poisoned values.
227
const uintptr_t magic_ : 24;
228
static constexpr uintptr_t magicNumber = uintptr_t(0x4c6966);
229
#endif
230
231
#if defined(DEBUG)
232
# define LIFO_CHUNK_PROTECT 1
233
#endif
234
235
// Poison the memory with memset, in order to catch errors due to
236
// use-after-free, with JS_LIFO_UNDEFINED_PATTERN pattern, or to catch
237
// use-before-init with JS_LIFO_UNINITIALIZED_PATTERN.
238
#if defined(DEBUG)
239
# define LIFO_HAVE_MEM_CHECKS 1
240
# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
241
do { \
242
uint8_t* base = (addr); \
243
size_t sz = (size); \
244
MOZ_MAKE_MEM_UNDEFINED(base, sz); \
245
memset(base, JS_LIFO_UNDEFINED_PATTERN, sz); \
246
MOZ_MAKE_MEM_NOACCESS(base, sz); \
247
} while (0)
248
249
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
250
do { \
251
uint8_t* base = (addr); \
252
size_t sz = (size); \
253
MOZ_MAKE_MEM_UNDEFINED(base, sz); \
254
memset(base, JS_LIFO_UNINITIALIZED_PATTERN, sz); \
255
MOZ_MAKE_MEM_UNDEFINED(base, sz); \
256
} while (0)
257
258
#elif defined(MOZ_HAVE_MEM_CHECKS)
259
# define LIFO_HAVE_MEM_CHECKS 1
260
# define LIFO_MAKE_MEM_NOACCESS(addr, size) \
261
MOZ_MAKE_MEM_NOACCESS((addr), (size))
262
# define LIFO_MAKE_MEM_UNDEFINED(addr, size) \
263
MOZ_MAKE_MEM_UNDEFINED((addr), (size))
264
#endif
265
266
#ifdef LIFO_HAVE_MEM_CHECKS
267
// Red zone reserved after each allocation.
268
static constexpr size_t RedZoneSize = 16;
269
#else
270
static constexpr size_t RedZoneSize = 0;
271
#endif
272
273
void assertInvariants() {
274
MOZ_DIAGNOSTIC_ASSERT(magic_ == magicNumber);
275
MOZ_ASSERT(begin() <= end());
276
MOZ_ASSERT(end() <= capacity_);
277
}
278
279
BumpChunk& operator=(const BumpChunk&) = delete;
280
BumpChunk(const BumpChunk&) = delete;
281
282
explicit BumpChunk(uintptr_t capacity)
283
: bump_(begin()),
284
capacity_(base() + capacity)
285
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
286
,
287
magic_(magicNumber)
288
#endif
289
{
290
assertInvariants();
291
#if defined(LIFO_HAVE_MEM_CHECKS)
292
// The memory is freshly allocated and marked as undefined by the
293
// allocator of the BumpChunk. Instead, we mark this memory as
294
// no-access, as it has not been allocated within the BumpChunk.
295
LIFO_MAKE_MEM_NOACCESS(bump_, capacity_ - bump_);
296
#endif
297
}
298
299
// Cast |this| into a uint8_t* pointer.
300
//
301
// Warning: Are you sure you do not want to use begin() instead?
302
const uint8_t* base() const { return reinterpret_cast<const uint8_t*>(this); }
303
uint8_t* base() { return reinterpret_cast<uint8_t*>(this); }
304
305
// Update the bump pointer to any value contained in this chunk, which is
306
// above the private fields of this chunk.
307
//
308
// The memory is poisoned and flagged as no-access when the bump pointer is
309
// moving backward, such as when memory is released, or when a Mark is used
310
// to unwind previous allocations.
311
//
312
// The memory is flagged as undefined when the bump pointer is moving
313
// forward.
314
void setBump(uint8_t* newBump) {
315
assertInvariants();
316
MOZ_ASSERT(begin() <= newBump);
317
MOZ_ASSERT(newBump <= capacity_);
318
#if defined(LIFO_HAVE_MEM_CHECKS)
319
// Poison/Unpoison memory that we just free'd/allocated.
320
if (bump_ > newBump) {
321
LIFO_MAKE_MEM_NOACCESS(newBump, bump_ - newBump);
322
} else if (newBump > bump_) {
323
MOZ_ASSERT(newBump - RedZoneSize >= bump_);
324
LIFO_MAKE_MEM_UNDEFINED(bump_, newBump - RedZoneSize - bump_);
325
// The area [newBump - RedZoneSize .. newBump[ is already flagged as
326
// no-access either with the previous if-branch or with the
327
// BumpChunk constructor. No need to mark it twice.
328
}
329
#endif
330
bump_ = newBump;
331
}
332
333
public:
334
~BumpChunk() { release(); }
335
336
// Returns true if this chunk contains no allocated content.
337
bool empty() const { return end() == begin(); }
338
339
// Returns the size in bytes of the number of allocated space. This includes
340
// the size consumed by the alignment of the allocations.
341
size_t used() const { return end() - begin(); }
342
343
// These are used for manipulating a chunk as if it was a vector of bytes,
344
// and used for iterating over the content of the buffer (see
345
// LifoAlloc::Enum)
346
inline const uint8_t* begin() const;
347
inline uint8_t* begin();
348
uint8_t* end() const { return bump_; }
349
350
// This function is the only way to allocate and construct a chunk. It
351
// returns a UniquePtr to the newly allocated chunk. The size given as
352
// argument includes the space needed for the header of the chunk.
353
static UniquePtr<BumpChunk> newWithCapacity(size_t size);
354
355
// Report allocation.
356
size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
357
return mallocSizeOf(this);
358
}
359
360
// Report allocation size.
361
size_t computedSizeOfIncludingThis() const { return capacity_ - base(); }
362
363
// Opaque type used to carry a pointer to the current location of the bump_
364
// pointer, within a BumpChunk.
365
class Mark {
366
// Chunk which owns the pointer.
367
BumpChunk* chunk_;
368
// Recorded of the bump_ pointer of the BumpChunk.
369
uint8_t* bump_;
370
371
friend class BumpChunk;
372
Mark(BumpChunk* chunk, uint8_t* bump) : chunk_(chunk), bump_(bump) {}
373
374
public:
375
Mark() : chunk_(nullptr), bump_(nullptr) {}
376
377
BumpChunk* markedChunk() const { return chunk_; }
378
};
379
380
// Return a mark to be able to unwind future allocations with the release
381
// function. (see LifoAllocScope)
382
Mark mark() { return Mark(this, end()); }
383
384
// Check if a pointer is part of the allocated data of this chunk.
385
bool contains(void* ptr) const {
386
// Note: We cannot check "ptr < end()" because the mark have a 0-size
387
// length.
388
return begin() <= ptr && ptr <= end();
389
}
390
391
// Check if a mark is part of the allocated data of this chunk.
392
bool contains(Mark m) const {
393
MOZ_ASSERT(m.chunk_ == this);
394
return contains(m.bump_);
395
}
396
397
// Release the memory allocated in this chunk. This function does not call
398
// any of the destructors.
399
void release() { setBump(begin()); }
400
401
// Release the memory allocated in this chunk since the corresponding mark
402
// got created. This function does not call any of the destructors.
403
void release(Mark m) {
404
MOZ_RELEASE_ASSERT(contains(m));
405
setBump(m.bump_);
406
}
407
408
// Given an amount, compute the total size of a chunk for it: reserved
409
// space before |begin()|, space for |amount| bytes, and red-zone space
410
// after those bytes that will ultimately end at |capacity_|.
411
static inline MOZ_MUST_USE bool allocSizeWithRedZone(size_t amount,
412
size_t* size);
413
414
// Given a bump chunk pointer, find the next base/end pointers. This is
415
// useful for having consistent allocations, and iterating over known size
416
// allocations.
417
static uint8_t* nextAllocBase(uint8_t* e) { return detail::AlignPtr(e); }
418
static uint8_t* nextAllocEnd(uint8_t* b, size_t n) {
419
return b + n + RedZoneSize;
420
}
421
422
// Returns true, if the unused space is large enough for an allocation of
423
// |n| bytes.
424
bool canAlloc(size_t n) const {
425
uint8_t* newBump = nextAllocEnd(nextAllocBase(end()), n);
426
// bump_ <= newBump, is necessary to catch overflow.
427
return bump_ <= newBump && newBump <= capacity_;
428
}
429
430
// Space remaining in the current chunk.
431
size_t unused() const {
432
uint8_t* aligned = nextAllocBase(end());
433
if (aligned < capacity_) {
434
return capacity_ - aligned;
435
}
436
return 0;
437
}
438
439
// Try to perform an allocation of size |n|, returns nullptr if not possible.
440
MOZ_ALWAYS_INLINE
441
void* tryAlloc(size_t n) {
442
uint8_t* aligned = nextAllocBase(end());
443
uint8_t* newBump = nextAllocEnd(aligned, n);
444
445
if (newBump > capacity_) {
446
return nullptr;
447
}
448
449
// Check for overflow.
450
if (MOZ_UNLIKELY(newBump < bump_)) {
451
return nullptr;
452
}
453
454
MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
455
setBump(newBump);
456
return aligned;
457
}
458
459
#ifdef LIFO_CHUNK_PROTECT
460
void setReadOnly();
461
void setReadWrite();
462
#else
463
void setReadOnly() const {}
464
void setReadWrite() const {}
465
#endif
466
};
467
468
// Space reserved for the BumpChunk internal data, and the alignment of the
469
// first allocation content. This can be used to ensure there is enough space
470
// for the next allocation (see LifoAlloc::newChunkWithCapacity).
471
static constexpr size_t BumpChunkReservedSpace =
472
AlignBytes(sizeof(BumpChunk), LIFO_ALLOC_ALIGN);
473
474
/* static */ inline MOZ_MUST_USE bool BumpChunk::allocSizeWithRedZone(
475
size_t amount, size_t* size) {
476
constexpr size_t SpaceBefore = BumpChunkReservedSpace;
477
static_assert((SpaceBefore % LIFO_ALLOC_ALIGN) == 0,
478
"reserved space presumed already aligned");
479
480
constexpr size_t SpaceAfter = RedZoneSize; // may be zero
481
482
constexpr size_t SpaceBeforeAndAfter = SpaceBefore + SpaceAfter;
483
static_assert(SpaceBeforeAndAfter >= SpaceBefore,
484
"intermediate addition must not overflow");
485
486
*size = SpaceBeforeAndAfter + amount;
487
return MOZ_LIKELY(*size >= SpaceBeforeAndAfter);
488
}
489
490
inline const uint8_t* BumpChunk::begin() const {
491
return base() + BumpChunkReservedSpace;
492
}
493
494
inline uint8_t* BumpChunk::begin() { return base() + BumpChunkReservedSpace; }
495
496
} // namespace detail
497
498
// LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
499
//
500
// Note: We leave BumpChunks latent in the set of unused chunks after they've
501
// been released to avoid thrashing before a GC.
502
class LifoAlloc {
503
using UniqueBumpChunk = js::UniquePtr<detail::BumpChunk>;
504
using BumpChunkList = detail::SingleLinkedList<detail::BumpChunk>;
505
506
// List of chunks containing allocated data of size smaller than the default
507
// chunk size. In the common case, the last chunk of this list is always
508
// used to perform the allocations. When the allocation cannot be performed,
509
// we move a Chunk from the unused set to the list of used chunks.
510
BumpChunkList chunks_;
511
512
// List of chunks containing allocated data where each allocation is larger
513
// than the oversize threshold. Each chunk contains exactly one allocation.
514
// This reduces wasted space in the chunk list.
515
//
516
// Oversize chunks are allocated on demand and freed as soon as they are
517
// released, instead of being pushed to the unused list.
518
BumpChunkList oversize_;
519
520
// Set of unused chunks, which can be reused for future allocations.
521
BumpChunkList unused_;
522
523
size_t markCount;
524
size_t defaultChunkSize_;
525
size_t oversizeThreshold_;
526
527
// Size of all chunks in chunks_, oversize_, unused_ lists.
528
size_t curSize_;
529
size_t peakSize_;
530
531
// Size of all chunks containing small bump allocations. This heuristic is
532
// used to compute growth rate while ignoring chunks such as oversized,
533
// now-unused, or transferred (which followed their own growth patterns).
534
size_t smallAllocsSize_;
535
536
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
537
bool fallibleScope_;
538
#endif
539
540
void operator=(const LifoAlloc&) = delete;
541
LifoAlloc(const LifoAlloc&) = delete;
542
543
// Return a BumpChunk that can perform an allocation of at least size |n|.
544
UniqueBumpChunk newChunkWithCapacity(size_t n, bool oversize);
545
546
// Reuse or allocate a BumpChunk that can perform an allocation of at least
547
// size |n|, if successful it is placed at the end the list of |chunks_|.
548
UniqueBumpChunk getOrCreateChunk(size_t n);
549
550
void reset(size_t defaultChunkSize);
551
552
// Append unused chunks to the end of this LifoAlloc.
553
void appendUnused(BumpChunkList&& otherUnused) {
554
#ifdef DEBUG
555
for (detail::BumpChunk& bc : otherUnused) {
556
MOZ_ASSERT(bc.empty());
557
}
558
#endif
559
unused_.appendAll(std::move(otherUnused));
560
}
561
562
// Append used chunks to the end of this LifoAlloc. We act as if all the
563
// chunks in |this| are used, even if they're not, so memory may be wasted.
564
void appendUsed(BumpChunkList&& otherChunks) {
565
chunks_.appendAll(std::move(otherChunks));
566
}
567
568
// Track the amount of space allocated in used and unused chunks.
569
void incrementCurSize(size_t size) {
570
curSize_ += size;
571
if (curSize_ > peakSize_) {
572
peakSize_ = curSize_;
573
}
574
}
575
void decrementCurSize(size_t size) {
576
MOZ_ASSERT(curSize_ >= size);
577
curSize_ -= size;
578
MOZ_ASSERT(curSize_ >= smallAllocsSize_);
579
}
580
581
void* allocImplColdPath(size_t n);
582
void* allocImplOversize(size_t n);
583
584
MOZ_ALWAYS_INLINE
585
void* allocImpl(size_t n) {
586
void* result;
587
// Give oversized allocations their own chunk instead of wasting space
588
// due to fragmentation at the end of normal chunk.
589
if (MOZ_UNLIKELY(n > oversizeThreshold_)) {
590
return allocImplOversize(n);
591
}
592
if (MOZ_LIKELY(!chunks_.empty() &&
593
(result = chunks_.last()->tryAlloc(n)))) {
594
return result;
595
}
596
return allocImplColdPath(n);
597
}
598
599
// Check for space in unused chunks or allocate a new unused chunk.
600
MOZ_MUST_USE bool ensureUnusedApproximateColdPath(size_t n, size_t total);
601
602
public:
603
explicit LifoAlloc(size_t defaultChunkSize)
604
: peakSize_(0)
605
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
606
,
607
fallibleScope_(true)
608
#endif
609
{
610
reset(defaultChunkSize);
611
}
612
613
// Set the threshold to allocate data in its own chunk outside the space for
614
// small allocations.
615
void disableOversize() { oversizeThreshold_ = SIZE_MAX; }
616
void setOversizeThreshold(size_t oversizeThreshold) {
617
MOZ_ASSERT(oversizeThreshold <= defaultChunkSize_);
618
oversizeThreshold_ = oversizeThreshold;
619
}
620
621
// Steal allocated chunks from |other|.
622
void steal(LifoAlloc* other);
623
624
// Append all chunks from |other|. They are removed from |other|.
625
void transferFrom(LifoAlloc* other);
626
627
// Append unused chunks from |other|. They are removed from |other|.
628
void transferUnusedFrom(LifoAlloc* other);
629
630
~LifoAlloc() { freeAll(); }
631
632
size_t defaultChunkSize() const { return defaultChunkSize_; }
633
634
// Frees all held memory.
635
void freeAll();
636
637
static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
638
void freeAllIfHugeAndUnused() {
639
if (markCount == 0 && curSize_ > HUGE_ALLOCATION) {
640
freeAll();
641
}
642
}
643
644
MOZ_ALWAYS_INLINE
645
void* alloc(size_t n) {
646
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
647
// Only simulate OOMs when we are not using the LifoAlloc as an
648
// infallible allocator.
649
if (fallibleScope_) {
650
JS_OOM_POSSIBLY_FAIL();
651
}
652
#endif
653
return allocImpl(n);
654
}
655
656
// Allocates |n| bytes if we can guarantee that we will have
657
// |needed| unused bytes remaining. Returns nullptr if we can't.
658
// This is useful for maintaining our ballast invariants while
659
// attempting fallible allocations.
660
MOZ_ALWAYS_INLINE
661
void* allocEnsureUnused(size_t n, size_t needed) {
662
JS_OOM_POSSIBLY_FAIL();
663
MOZ_ASSERT(fallibleScope_);
664
665
Mark m = mark();
666
void* result = allocImpl(n);
667
if (!ensureUnusedApproximate(needed)) {
668
release(m);
669
return nullptr;
670
}
671
cancelMark(m);
672
return result;
673
}
674
675
template <typename T, typename... Args>
676
MOZ_ALWAYS_INLINE T* newWithSize(size_t n, Args&&... args) {
677
MOZ_ASSERT(n >= sizeof(T), "must request enough space to store a T");
678
static_assert(alignof(T) <= detail::LIFO_ALLOC_ALIGN,
679
"LifoAlloc must provide enough alignment to store T");
680
void* ptr = alloc(n);
681
if (!ptr) {
682
return nullptr;
683
}
684
685
return new (ptr) T(std::forward<Args>(args)...);
686
}
687
688
MOZ_ALWAYS_INLINE
689
void* allocInfallible(size_t n) {
690
AutoEnterOOMUnsafeRegion oomUnsafe;
691
if (void* result = allocImpl(n)) {
692
return result;
693
}
694
oomUnsafe.crash("LifoAlloc::allocInfallible");
695
return nullptr;
696
}
697
698
// Ensures that enough space exists to satisfy N bytes worth of
699
// allocation requests, not necessarily contiguous. Note that this does
700
// not guarantee a successful single allocation of N bytes.
701
MOZ_ALWAYS_INLINE
702
MOZ_MUST_USE bool ensureUnusedApproximate(size_t n) {
703
AutoFallibleScope fallibleAllocator(this);
704
size_t total = 0;
705
if (!chunks_.empty()) {
706
total += chunks_.last()->unused();
707
if (total >= n) {
708
return true;
709
}
710
}
711
712
return ensureUnusedApproximateColdPath(n, total);
713
}
714
715
MOZ_ALWAYS_INLINE
716
void setAsInfallibleByDefault() {
717
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
718
fallibleScope_ = false;
719
#endif
720
}
721
722
class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
723
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
724
LifoAlloc* lifoAlloc_;
725
bool prevFallibleScope_;
726
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
727
728
public:
729
explicit AutoFallibleScope(
730
LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
731
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
732
lifoAlloc_ = lifoAlloc;
733
prevFallibleScope_ = lifoAlloc->fallibleScope_;
734
lifoAlloc->fallibleScope_ = true;
735
}
736
737
~AutoFallibleScope() { lifoAlloc_->fallibleScope_ = prevFallibleScope_; }
738
#else
739
public:
740
explicit AutoFallibleScope(LifoAlloc*) {}
741
#endif
742
};
743
744
template <typename T>
745
T* newArray(size_t count) {
746
static_assert(mozilla::IsPod<T>::value,
747
"T must be POD so that constructors (and destructors, "
748
"when the LifoAlloc is freed) need not be called");
749
return newArrayUninitialized<T>(count);
750
}
751
752
// Create an array with uninitialized elements of type |T|.
753
// The caller is responsible for initialization.
754
template <typename T>
755
T* newArrayUninitialized(size_t count) {
756
size_t bytes;
757
if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes))) {
758
return nullptr;
759
}
760
return static_cast<T*>(alloc(bytes));
761
}
762
763
class Mark {
764
friend class LifoAlloc;
765
detail::BumpChunk::Mark chunk;
766
detail::BumpChunk::Mark oversize;
767
};
768
769
// Note: MOZ_NEVER_INLINE is a work around for a Clang 9 (PGO) miscompilation.
770
// See bug 1583907.
771
MOZ_NEVER_INLINE Mark mark();
772
773
void release(Mark mark);
774
775
private:
776
void cancelMark(Mark mark) { markCount--; }
777
778
public:
779
void releaseAll() {
780
MOZ_ASSERT(!markCount);
781
782
// When releasing all chunks, we can no longer determine which chunks were
783
// transferred and which were not, so simply clear the heuristic to zero
784
// right away.
785
smallAllocsSize_ = 0;
786
787
for (detail::BumpChunk& bc : chunks_) {
788
bc.release();
789
}
790
unused_.appendAll(std::move(chunks_));
791
792
// On release, we free any oversize allocations instead of keeping them
793
// in unused chunks.
794
while (!oversize_.empty()) {
795
UniqueBumpChunk bc = oversize_.popFirst();
796
decrementCurSize(bc->computedSizeOfIncludingThis());
797
}
798
}
799
800
// Protect the content of the LifoAlloc chunks.
801
#ifdef LIFO_CHUNK_PROTECT
802
void setReadOnly();
803
void setReadWrite();
804
#else
805
void setReadOnly() const {}
806
void setReadWrite() const {}
807
#endif
808
809
// Get the total "used" (occupied bytes) count for the arena chunks.
810
size_t used() const {
811
size_t accum = 0;
812
for (const detail::BumpChunk& chunk : chunks_) {
813
accum += chunk.used();
814
}
815
return accum;
816
}
817
818
// Return true if the LifoAlloc does not currently contain any allocations.
819
bool isEmpty() const {
820
bool empty = chunks_.empty() ||
821
(chunks_.begin() == chunks_.last() && chunks_.last()->empty());
822
MOZ_ASSERT_IF(!oversize_.empty(), !oversize_.last()->empty());
823
return empty && oversize_.empty();
824
}
825
826
// Return the number of bytes remaining to allocate in the current chunk.
827
// e.g. How many bytes we can allocate before needing a new block.
828
size_t availableInCurrentChunk() const {
829
if (chunks_.empty()) {
830
return 0;
831
}
832
return chunks_.last()->unused();
833
}
834
835
// Get the total size of the arena chunks (including unused space).
836
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
837
size_t n = 0;
838
for (const detail::BumpChunk& chunk : chunks_) {
839
n += chunk.sizeOfIncludingThis(mallocSizeOf);
840
}
841
for (const detail::BumpChunk& chunk : oversize_) {
842
n += chunk.sizeOfIncludingThis(mallocSizeOf);
843
}
844
for (const detail::BumpChunk& chunk : unused_) {
845
n += chunk.sizeOfIncludingThis(mallocSizeOf);
846
}
847
return n;
848
}
849
850
// Get the total size of the arena chunks (including unused space).
851
size_t computedSizeOfExcludingThis() const {
852
size_t n = 0;
853
for (const detail::BumpChunk& chunk : chunks_) {
854
n += chunk.computedSizeOfIncludingThis();
855
}
856
for (const detail::BumpChunk& chunk : oversize_) {
857
n += chunk.computedSizeOfIncludingThis();
858
}
859
for (const detail::BumpChunk& chunk : unused_) {
860
n += chunk.computedSizeOfIncludingThis();
861
}
862
return n;
863
}
864
865
// Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
866
size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
867
return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
868
}
869
870
// Get the peak size of the arena chunks (including unused space and
871
// bookkeeping space).
872
size_t peakSizeOfExcludingThis() const { return peakSize_; }
873
874
// Doesn't perform construction; useful for lazily-initialized POD types.
875
template <typename T>
876
MOZ_ALWAYS_INLINE T* pod_malloc() {
877
return static_cast<T*>(alloc(sizeof(T)));
878
}
879
880
JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
881
JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
882
883
#ifdef DEBUG
884
bool contains(void* ptr) const {
885
for (const detail::BumpChunk& chunk : chunks_) {
886
if (chunk.contains(ptr)) {
887
return true;
888
}
889
}
890
for (const detail::BumpChunk& chunk : oversize_) {
891
if (chunk.contains(ptr)) {
892
return true;
893
}
894
}
895
return false;
896
}
897
#endif
898
899
// Iterate over the data allocated in a LifoAlloc, and interpret it.
900
class Enum {
901
friend class LifoAlloc;
902
friend class detail::BumpChunk;
903
904
// Iterator over the list of bump chunks.
905
BumpChunkList::Iterator chunkIt_;
906
BumpChunkList::Iterator chunkEnd_;
907
// Read head (must be within chunk_).
908
uint8_t* head_;
909
910
// If there is not enough room in the remaining block for |size|,
911
// advance to the next block and update the position.
912
uint8_t* seekBaseAndAdvanceBy(size_t size) {
913
MOZ_ASSERT(!empty());
914
915
uint8_t* aligned = detail::BumpChunk::nextAllocBase(head_);
916
if (detail::BumpChunk::nextAllocEnd(aligned, size) > chunkIt_->end()) {
917
++chunkIt_;
918
aligned = chunkIt_->begin();
919
// The current code assumes that if we have a chunk, then we
920
// have allocated something it in.
921
MOZ_ASSERT(!chunkIt_->empty());
922
}
923
head_ = detail::BumpChunk::nextAllocEnd(aligned, size);
924
MOZ_ASSERT(head_ <= chunkIt_->end());
925
return aligned;
926
}
927
928
public:
929
explicit Enum(LifoAlloc& alloc)
930
: chunkIt_(alloc.chunks_.begin()),
931
chunkEnd_(alloc.chunks_.end()),
932
head_(nullptr) {
933
MOZ_RELEASE_ASSERT(alloc.oversize_.empty());
934
if (chunkIt_ != chunkEnd_) {
935
head_ = chunkIt_->begin();
936
}
937
}
938
939
// Return true if there are no more bytes to enumerate.
940
bool empty() {
941
return chunkIt_ == chunkEnd_ ||
942
(chunkIt_->next() == chunkEnd_.get() && head_ >= chunkIt_->end());
943
}
944
945
// Move the read position forward by the size of one T.
946
template <typename T>
947
T* read(size_t size = sizeof(T)) {
948
return reinterpret_cast<T*>(read(size));
949
}
950
951
// Return a pointer to the item at the current position. This returns a
952
// pointer to the inline storage, not a copy, and moves the read-head by
953
// the requested |size|.
954
void* read(size_t size) { return seekBaseAndAdvanceBy(size); }
955
};
956
};
957
958
class MOZ_NON_TEMPORARY_CLASS LifoAllocScope {
959
LifoAlloc* lifoAlloc;
960
LifoAlloc::Mark mark;
961
LifoAlloc::AutoFallibleScope fallibleScope;
962
bool shouldRelease;
963
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
964
965
public:
966
explicit LifoAllocScope(LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
967
: lifoAlloc(lifoAlloc),
968
mark(lifoAlloc->mark()),
969
fallibleScope(lifoAlloc),
970
shouldRelease(true) {
971
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
972
}
973
974
~LifoAllocScope() {
975
if (shouldRelease) {
976
lifoAlloc->release(mark);
977
978
/*
979
* The parser can allocate enormous amounts of memory for large functions.
980
* Eagerly free the memory now (which otherwise won't be freed until the
981
* next GC) to avoid unnecessary OOMs.
982
*/
983
lifoAlloc->freeAllIfHugeAndUnused();
984
}
985
}
986
987
LifoAlloc& alloc() { return *lifoAlloc; }
988
989
void releaseEarly() {
990
MOZ_ASSERT(shouldRelease);
991
lifoAlloc->release(mark);
992
shouldRelease = false;
993
}
994
};
995
996
enum Fallibility { Fallible, Infallible };
997
998
template <Fallibility fb>
999
class LifoAllocPolicy {
1000
LifoAlloc& alloc_;
1001
1002
public:
1003
MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc) : alloc_(alloc) {}
1004
template <typename T>
1005
T* maybe_pod_malloc(size_t numElems) {
1006
size_t bytes;
1007
if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
1008
return nullptr;
1009
}
1010
void* p =
1011
fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
1012
return static_cast<T*>(p);
1013
}
1014
template <typename T>
1015
T* maybe_pod_calloc(size_t numElems) {
1016
T* p = maybe_pod_malloc<T>(numElems);
1017
if (MOZ_UNLIKELY(!p)) {
1018
return nullptr;
1019
}
1020
memset(p, 0, numElems * sizeof(T));
1021
return p;
1022
}
1023
template <typename T>
1024
T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
1025
T* n = maybe_pod_malloc<T>(newSize);
1026
if (MOZ_UNLIKELY(!n)) {
1027
return nullptr;
1028
}
1029
MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
1030
memcpy(n, p, std::min(oldSize * sizeof(T), newSize * sizeof(T)));
1031
return n;
1032
}
1033
template <typename T>
1034
T* pod_malloc(size_t numElems) {
1035
return maybe_pod_malloc<T>(numElems);
1036
}
1037
template <typename T>
1038
T* pod_calloc(size_t numElems) {
1039
return maybe_pod_calloc<T>(numElems);
1040
}
1041
template <typename T>
1042
T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
1043
return maybe_pod_realloc<T>(p, oldSize, newSize);
1044
}
1045
template <typename T>
1046
void free_(T* p, size_t numElems) {}
1047
void reportAllocOverflow() const {}
1048
MOZ_MUST_USE bool checkSimulatedOOM() const {
1049
return fb == Infallible || !js::oom::ShouldFailWithOOM();
1050
}
1051
};
1052
1053
} // namespace js
1054
1055
#endif /* ds_LifoAlloc_h */