Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "ds/LifoAlloc.h"
8
9
#include "mozilla/Likely.h"
10
#include "mozilla/MathAlgorithms.h"
11
12
#include <algorithm>
13
14
#include "ds/MemoryProtectionExceptionHandler.h"
15
16
#ifdef LIFO_CHUNK_PROTECT
17
# include "gc/Memory.h"
18
#endif
19
20
using namespace js;
21
22
using mozilla::RoundUpPow2;
23
using mozilla::tl::BitSize;
24
25
namespace js {
26
namespace detail {
27
28
/* static */
29
UniquePtr<BumpChunk> BumpChunk::newWithCapacity(size_t size) {
30
MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
31
void* mem = js_malloc(size);
32
if (!mem) {
33
return nullptr;
34
}
35
36
UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
37
38
// We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
39
// underlying memory allocator -- creating a new BumpChunk should always
40
// satisfy the LIFO_ALLOC_ALIGN alignment constraint.
41
MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
42
return result;
43
}
44
45
#ifdef LIFO_CHUNK_PROTECT
46
47
static uint8_t* AlignPtrUp(uint8_t* ptr, uintptr_t align) {
48
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
49
uintptr_t uptr = uintptr_t(ptr);
50
uintptr_t diff = uptr & (align - 1);
51
diff = (align - diff) & (align - 1);
52
uptr = uptr + diff;
53
return (uint8_t*)uptr;
54
}
55
56
static uint8_t* AlignPtrDown(uint8_t* ptr, uintptr_t align) {
57
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
58
uintptr_t uptr = uintptr_t(ptr);
59
uptr = uptr & ~(align - 1);
60
return (uint8_t*)uptr;
61
}
62
63
void BumpChunk::setReadOnly() {
64
uintptr_t pageSize = gc::SystemPageSize();
65
// The allocated chunks might not be aligned on page boundaries. This code
66
// is used to ensure that we are changing the memory protection of pointers
67
// which are within the range of the BumpChunk, or that the range formed by
68
// [b .. e] is empty.
69
uint8_t* b = base();
70
uint8_t* e = capacity_;
71
b = AlignPtrUp(b, pageSize);
72
e = AlignPtrDown(e, pageSize);
73
if (e <= b) {
74
return;
75
}
76
js::MemoryProtectionExceptionHandler::addRegion(base(), capacity_ - base());
77
gc::MakePagesReadOnly(b, e - b);
78
}
79
80
void BumpChunk::setReadWrite() {
81
uintptr_t pageSize = gc::SystemPageSize();
82
// The allocated chunks might not be aligned on page boundaries. This code
83
// is used to ensure that we are changing the memory protection of pointers
84
// which are within the range of the BumpChunk, or that the range formed by
85
// [b .. e] is empty.
86
uint8_t* b = base();
87
uint8_t* e = capacity_;
88
b = AlignPtrUp(b, pageSize);
89
e = AlignPtrDown(e, pageSize);
90
if (e <= b) {
91
return;
92
}
93
gc::UnprotectPages(b, e - b);
94
js::MemoryProtectionExceptionHandler::removeRegion(base());
95
}
96
97
#endif
98
99
} // namespace detail
100
} // namespace js
101
102
void LifoAlloc::reset(size_t defaultChunkSize) {
103
MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
104
105
while (!chunks_.empty()) {
106
chunks_.popFirst();
107
}
108
while (!oversize_.empty()) {
109
oversize_.popFirst();
110
}
111
while (!unused_.empty()) {
112
unused_.popFirst();
113
}
114
defaultChunkSize_ = defaultChunkSize;
115
oversizeThreshold_ = defaultChunkSize;
116
markCount = 0;
117
curSize_ = 0;
118
smallAllocsSize_ = 0;
119
}
120
121
void LifoAlloc::freeAll() {
122
// When free-ing all chunks, we can no longer determine which chunks were
123
// transferred and which were not, so simply clear the heuristic to zero
124
// right away.
125
smallAllocsSize_ = 0;
126
127
while (!chunks_.empty()) {
128
UniqueBumpChunk bc = chunks_.popFirst();
129
decrementCurSize(bc->computedSizeOfIncludingThis());
130
}
131
while (!oversize_.empty()) {
132
UniqueBumpChunk bc = oversize_.popFirst();
133
decrementCurSize(bc->computedSizeOfIncludingThis());
134
}
135
while (!unused_.empty()) {
136
UniqueBumpChunk bc = unused_.popFirst();
137
decrementCurSize(bc->computedSizeOfIncludingThis());
138
}
139
140
// Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
141
// excellent sanity check.
142
MOZ_ASSERT(curSize_ == 0);
143
}
144
145
// Round at the same page granularity used by malloc.
146
static size_t MallocGoodSize(size_t aSize) {
147
#if defined(MOZ_MEMORY)
148
return malloc_good_size(aSize);
149
#else
150
return aSize;
151
#endif
152
}
153
154
// Heuristic to choose the size of the next BumpChunk for small allocations.
155
// `start` is the size of the first chunk. `used` is the total size of all
156
// BumpChunks in this LifoAlloc so far.
157
static size_t NextSize(size_t start, size_t used) {
158
// Double the size, up to 1 MB.
159
const size_t mb = 1 * 1024 * 1024;
160
if (used < mb) {
161
return std::max(start, used);
162
}
163
164
// After 1 MB, grow more gradually, to waste less memory.
165
// The sequence (in megabytes) begins:
166
// 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
167
return RoundUp(used / 8, mb);
168
}
169
170
LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
171
bool oversize) {
172
MOZ_ASSERT(fallibleScope_,
173
"[OOM] Cannot allocate a new chunk in an infallible scope.");
174
175
// Compute the size which should be requested in order to be able to fit |n|
176
// bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
177
178
size_t minSize;
179
if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
180
(minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
181
return nullptr;
182
}
183
184
// Note: When computing chunkSize growth, we only are interested in chunks
185
// used for small allocations. This excludes unused chunks, oversized chunks,
186
// and chunks transferred in from another LifoAlloc.
187
MOZ_ASSERT(curSize_ >= smallAllocsSize_);
188
const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
189
? MallocGoodSize(minSize)
190
: NextSize(defaultChunkSize_, smallAllocsSize_);
191
192
// Create a new BumpChunk, and allocate space for it.
193
UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
194
if (!result) {
195
return nullptr;
196
}
197
MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
198
return result;
199
}
200
201
LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
202
// Look for existing unused BumpChunks to satisfy the request, and pick the
203
// first one which is large enough, and move it into the list of used
204
// chunks.
205
if (!unused_.empty()) {
206
if (unused_.begin()->canAlloc(n)) {
207
return unused_.popFirst();
208
}
209
210
BumpChunkList::Iterator e(unused_.end());
211
for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get();
212
++i) {
213
detail::BumpChunk* elem = i->next();
214
MOZ_ASSERT(elem->empty());
215
if (elem->canAlloc(n)) {
216
BumpChunkList temp = unused_.splitAfter(i.get());
217
UniqueBumpChunk newChunk = temp.popFirst();
218
unused_.appendAll(std::move(temp));
219
return newChunk;
220
}
221
}
222
}
223
224
// Allocate a new BumpChunk with enough space for the next allocation.
225
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
226
if (!newChunk) {
227
return newChunk;
228
}
229
incrementCurSize(newChunk->computedSizeOfIncludingThis());
230
return newChunk;
231
}
232
233
void* LifoAlloc::allocImplColdPath(size_t n) {
234
void* result;
235
UniqueBumpChunk newChunk = getOrCreateChunk(n);
236
if (!newChunk) {
237
return nullptr;
238
}
239
240
// This new chunk is about to be used for small allocations.
241
smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
242
243
// Since we just created a large enough chunk, this can't fail.
244
chunks_.append(std::move(newChunk));
245
result = chunks_.last()->tryAlloc(n);
246
MOZ_ASSERT(result);
247
return result;
248
}
249
250
void* LifoAlloc::allocImplOversize(size_t n) {
251
void* result;
252
UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
253
if (!newChunk) {
254
return nullptr;
255
}
256
incrementCurSize(newChunk->computedSizeOfIncludingThis());
257
258
// Since we just created a large enough chunk, this can't fail.
259
oversize_.append(std::move(newChunk));
260
result = oversize_.last()->tryAlloc(n);
261
MOZ_ASSERT(result);
262
return result;
263
}
264
265
bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
266
for (detail::BumpChunk& bc : unused_) {
267
total += bc.unused();
268
if (total >= n) {
269
return true;
270
}
271
}
272
273
UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
274
if (!newChunk) {
275
return false;
276
}
277
incrementCurSize(newChunk->computedSizeOfIncludingThis());
278
unused_.pushFront(std::move(newChunk));
279
return true;
280
}
281
282
LifoAlloc::Mark LifoAlloc::mark() {
283
markCount++;
284
Mark res;
285
if (!chunks_.empty()) {
286
res.chunk = chunks_.last()->mark();
287
}
288
if (!oversize_.empty()) {
289
res.oversize = oversize_.last()->mark();
290
}
291
return res;
292
}
293
294
void LifoAlloc::release(Mark mark) {
295
markCount--;
296
#ifdef DEBUG
297
auto assertIsContained = [](const detail::BumpChunk::Mark& m,
298
BumpChunkList& list) {
299
if (m.markedChunk()) {
300
bool contained = false;
301
for (const detail::BumpChunk& chunk : list) {
302
if (&chunk == m.markedChunk() && chunk.contains(m)) {
303
contained = true;
304
break;
305
}
306
}
307
MOZ_ASSERT(contained);
308
}
309
};
310
assertIsContained(mark.chunk, chunks_);
311
assertIsContained(mark.oversize, oversize_);
312
#endif
313
314
BumpChunkList released;
315
auto cutAtMark = [&released](const detail::BumpChunk::Mark& m,
316
BumpChunkList& list) {
317
// Move the blocks which are after the mark to the set released chunks.
318
if (!m.markedChunk()) {
319
released = std::move(list);
320
} else {
321
released = list.splitAfter(m.markedChunk());
322
}
323
324
// Release everything which follows the mark in the last chunk.
325
if (!list.empty()) {
326
list.last()->release(m);
327
}
328
};
329
330
// Release the content of all the blocks which are after the marks, and keep
331
// blocks as unused.
332
cutAtMark(mark.chunk, chunks_);
333
for (detail::BumpChunk& bc : released) {
334
bc.release();
335
336
// Chunks moved from (after a mark) in chunks_ to unused_ are no longer
337
// considered small allocations.
338
smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
339
}
340
unused_.appendAll(std::move(released));
341
342
// Free the content of all the blocks which are after the marks.
343
cutAtMark(mark.oversize, oversize_);
344
while (!released.empty()) {
345
UniqueBumpChunk bc = released.popFirst();
346
decrementCurSize(bc->computedSizeOfIncludingThis());
347
}
348
}
349
350
void LifoAlloc::steal(LifoAlloc* other) {
351
MOZ_ASSERT(!other->markCount);
352
MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
353
MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
354
MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
355
356
// Copy everything from |other| to |this| except for |peakSize_|, which
357
// requires some care.
358
chunks_ = std::move(other->chunks_);
359
oversize_ = std::move(other->oversize_);
360
unused_ = std::move(other->unused_);
361
markCount = other->markCount;
362
defaultChunkSize_ = other->defaultChunkSize_;
363
oversizeThreshold_ = other->oversizeThreshold_;
364
curSize_ = other->curSize_;
365
peakSize_ = std::max(peakSize_, other->peakSize_);
366
smallAllocsSize_ = other->smallAllocsSize_;
367
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
368
fallibleScope_ = other->fallibleScope_;
369
#endif
370
371
other->reset(defaultChunkSize_);
372
}
373
374
void LifoAlloc::transferFrom(LifoAlloc* other) {
375
MOZ_ASSERT(!markCount);
376
MOZ_ASSERT(!other->markCount);
377
378
// Transferred chunks are not counted as part of |smallAllocsSize| as this
379
// could introduce bias in the |NextSize| heuristics, leading to
380
// over-allocations in *this* LifoAlloc. As well, to avoid interference with
381
// small allocations made with |this|, the last chunk of the |chunks_| list
382
// should remain the last chunk. Therefore, the transferred chunks are
383
// prepended to the |chunks_| list.
384
incrementCurSize(other->curSize_);
385
386
appendUnused(std::move(other->unused_));
387
chunks_.prependAll(std::move(other->chunks_));
388
oversize_.prependAll(std::move(other->oversize_));
389
other->curSize_ = 0;
390
other->smallAllocsSize_ = 0;
391
}
392
393
void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
394
MOZ_ASSERT(!markCount);
395
396
size_t size = 0;
397
for (detail::BumpChunk& bc : other->unused_) {
398
size += bc.computedSizeOfIncludingThis();
399
}
400
401
appendUnused(std::move(other->unused_));
402
incrementCurSize(size);
403
other->decrementCurSize(size);
404
}
405
406
#ifdef LIFO_CHUNK_PROTECT
407
void LifoAlloc::setReadOnly() {
408
for (detail::BumpChunk& bc : chunks_) {
409
bc.setReadOnly();
410
}
411
for (detail::BumpChunk& bc : oversize_) {
412
bc.setReadOnly();
413
}
414
for (detail::BumpChunk& bc : unused_) {
415
bc.setReadOnly();
416
}
417
}
418
419
void LifoAlloc::setReadWrite() {
420
for (detail::BumpChunk& bc : chunks_) {
421
bc.setReadWrite();
422
}
423
for (detail::BumpChunk& bc : oversize_) {
424
bc.setReadWrite();
425
}
426
for (detail::BumpChunk& bc : unused_) {
427
bc.setReadWrite();
428
}
429
}
430
#endif