Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#ifndef ds_PageProtectingVector_h
8
#define ds_PageProtectingVector_h
9
10
#include "mozilla/Atomics.h"
11
#include "mozilla/IntegerPrintfMacros.h"
12
#include "mozilla/PodOperations.h"
13
#include "mozilla/Types.h"
14
#include "mozilla/Vector.h"
15
16
#include "ds/MemoryProtectionExceptionHandler.h"
17
#include "gc/Memory.h"
18
19
namespace js {
20
21
/*
22
* PageProtectingVector is a vector that can only grow or be cleared, restricts
23
* access to memory pages that haven't been used yet, and marks all of its fully
24
* used memory pages as read-only. It can be used to detect heap corruption in
25
* important buffers, since anything that tries to write into its protected
26
* pages will crash. On Nightly and Aurora, these crashes will additionally be
27
* annotated with a moz crash reason using MemoryProtectionExceptionHandler.
28
*
29
* PageProtectingVector's protection is limited to full pages. If the front
30
* of its buffer is not aligned on a page boundary, elems preceding the first
31
* page boundary will not be protected. Similarly, the end of the buffer will
32
* not be fully protected unless it is aligned on a page boundary. Altogether,
33
* up to two pages of memory may not be protected.
34
*/
35
template <typename T, size_t MinInlineCapacity = 0,
36
class AllocPolicy = mozilla::MallocAllocPolicy,
37
bool ProtectUsed = true, bool ProtectUnused = true,
38
size_t InitialLowerBound = 0, bool PoisonUnused = true,
39
uint8_t PoisonPattern = 0xe3>
40
class PageProtectingVector final {
41
mozilla::Vector<T, MinInlineCapacity, AllocPolicy> vector;
42
43
static constexpr size_t toShift(size_t v) {
44
return v <= 1 ? 0 : 1 + toShift(v >> 1);
45
}
46
47
static_assert(
48
(sizeof(T) & (sizeof(T) - 1)) == 0,
49
"For performance reasons, "
50
"PageProtectingVector only works with power-of-2 sized elements!");
51
52
static const size_t elemShift = toShift(sizeof(T));
53
static const size_t elemSize = 1 << elemShift;
54
static const size_t elemMask = elemSize - 1;
55
56
/* We hardcode the page size here to minimize administrative overhead. */
57
static const size_t pageShift = 12;
58
static const size_t pageSize = 1 << pageShift;
59
static const size_t pageMask = pageSize - 1;
60
61
/*
62
* The number of elements that can be added before we need to either adjust
63
* the active page or resize the buffer. If |elemsUntilTest < 0| we will
64
* take the slow paths in the append calls.
65
*/
66
intptr_t elemsUntilTest;
67
68
/*
69
* The offset of the currently 'active' page - that is, the page that is
70
* currently being written to. If both used and unused bytes are protected,
71
* this will be the only (fully owned) page with read and write access.
72
*/
73
size_t currPage;
74
75
/*
76
* The first fully owned page. This is the first page that can
77
* be protected, but it may not be the first *active* page.
78
*/
79
size_t initPage;
80
81
/*
82
* The last fully owned page. This is the last page that can
83
* be protected, but it may not be the last *active* page.
84
*/
85
size_t lastPage;
86
87
/*
88
* The size in elems that a buffer needs to be before its pages will be
89
* protected. This is intended to reduce churn for small vectors while
90
* still offering protection when they grow large enough.
91
*/
92
size_t lowerBound;
93
94
#ifdef DEBUG
95
bool regionUnprotected;
96
#endif
97
98
bool usable;
99
bool enabled;
100
bool protectUsedEnabled;
101
bool protectUnusedEnabled;
102
103
MOZ_ALWAYS_INLINE void resetTest() {
104
MOZ_ASSERT(protectUsedEnabled || protectUnusedEnabled);
105
size_t nextPage =
106
(pageSize - (uintptr_t(begin() + length()) & pageMask)) >> elemShift;
107
size_t nextResize = capacity() - length();
108
if (MOZ_LIKELY(nextPage <= nextResize)) {
109
elemsUntilTest = intptr_t(nextPage);
110
} else {
111
elemsUntilTest = intptr_t(nextResize);
112
}
113
}
114
115
MOZ_ALWAYS_INLINE void setTestInitial() {
116
if (MOZ_LIKELY(!protectUsedEnabled && !protectUnusedEnabled)) {
117
elemsUntilTest = intptr_t(capacity() - length());
118
} else {
119
resetTest();
120
}
121
}
122
123
MOZ_ALWAYS_INLINE void resetForNewBuffer() {
124
initPage = (uintptr_t(begin() - 1) >> pageShift) + 1;
125
currPage = (uintptr_t(begin() + length()) >> pageShift);
126
lastPage = (uintptr_t(begin() + capacity()) >> pageShift) - 1;
127
protectUsedEnabled =
128
ProtectUsed && usable && enabled && initPage <= lastPage &&
129
(uintptr_t(begin()) & elemMask) == 0 && capacity() >= lowerBound;
130
protectUnusedEnabled =
131
ProtectUnused && usable && enabled && initPage <= lastPage &&
132
(uintptr_t(begin()) & elemMask) == 0 && capacity() >= lowerBound;
133
setTestInitial();
134
}
135
136
MOZ_ALWAYS_INLINE void poisonNewBuffer() {
137
if (!PoisonUnused) {
138
return;
139
}
140
T* addr = begin() + length();
141
size_t toPoison = (capacity() - length()) * sizeof(T);
142
memset(addr, PoisonPattern, toPoison);
143
}
144
145
MOZ_ALWAYS_INLINE void addExceptionHandler() {
146
if (MOZ_UNLIKELY(protectUsedEnabled || protectUnusedEnabled)) {
147
MemoryProtectionExceptionHandler::addRegion(begin(), capacity()
148
<< elemShift);
149
}
150
}
151
152
MOZ_ALWAYS_INLINE void removeExceptionHandler() {
153
if (MOZ_UNLIKELY(protectUsedEnabled || protectUnusedEnabled)) {
154
MemoryProtectionExceptionHandler::removeRegion(begin());
155
}
156
}
157
158
MOZ_ALWAYS_INLINE void protectUsed() {
159
if (MOZ_LIKELY(!protectUsedEnabled)) {
160
return;
161
}
162
if (MOZ_UNLIKELY(currPage <= initPage)) {
163
return;
164
}
165
T* addr = reinterpret_cast<T*>(initPage << pageShift);
166
size_t size = (currPage - initPage) << pageShift;
167
gc::MakePagesReadOnly(addr, size);
168
}
169
170
MOZ_ALWAYS_INLINE void unprotectUsed() {
171
if (MOZ_LIKELY(!protectUsedEnabled)) {
172
return;
173
}
174
if (MOZ_UNLIKELY(currPage <= initPage)) {
175
return;
176
}
177
T* addr = reinterpret_cast<T*>(initPage << pageShift);
178
size_t size = (currPage - initPage) << pageShift;
179
gc::UnprotectPages(addr, size);
180
}
181
182
MOZ_ALWAYS_INLINE void protectUnused() {
183
if (MOZ_LIKELY(!protectUnusedEnabled)) {
184
return;
185
}
186
if (MOZ_UNLIKELY(currPage >= lastPage)) {
187
return;
188
}
189
T* addr = reinterpret_cast<T*>((currPage + 1) << pageShift);
190
size_t size = (lastPage - currPage) << pageShift;
191
gc::ProtectPages(addr, size);
192
}
193
194
MOZ_ALWAYS_INLINE void unprotectUnused() {
195
if (MOZ_LIKELY(!protectUnusedEnabled)) {
196
return;
197
}
198
if (MOZ_UNLIKELY(currPage >= lastPage)) {
199
return;
200
}
201
T* addr = reinterpret_cast<T*>((currPage + 1) << pageShift);
202
size_t size = (lastPage - currPage) << pageShift;
203
gc::UnprotectPages(addr, size);
204
}
205
206
MOZ_ALWAYS_INLINE void protectNewBuffer() {
207
resetForNewBuffer();
208
addExceptionHandler();
209
poisonNewBuffer();
210
protectUsed();
211
protectUnused();
212
}
213
214
MOZ_ALWAYS_INLINE void unprotectOldBuffer() {
215
MOZ_ASSERT(!regionUnprotected);
216
unprotectUnused();
217
unprotectUsed();
218
removeExceptionHandler();
219
}
220
221
MOZ_ALWAYS_INLINE void protectUnusedPartial(size_t curr, size_t next) {
222
if (MOZ_LIKELY(!protectUnusedEnabled)) {
223
return;
224
}
225
if (MOZ_UNLIKELY(next > lastPage)) {
226
--next;
227
}
228
if (MOZ_UNLIKELY(next == curr)) {
229
return;
230
}
231
void* addr = reinterpret_cast<T*>((curr + 1) << pageShift);
232
size_t size = (next - curr) << pageShift;
233
gc::ProtectPages(addr, size);
234
}
235
236
MOZ_ALWAYS_INLINE void unprotectUnusedPartial(size_t curr, size_t next) {
237
if (MOZ_LIKELY(!protectUnusedEnabled)) {
238
return;
239
}
240
if (MOZ_UNLIKELY(next > lastPage)) {
241
--next;
242
}
243
if (MOZ_UNLIKELY(next == curr)) {
244
return;
245
}
246
void* addr = reinterpret_cast<T*>((curr + 1) << pageShift);
247
size_t size = (next - curr) << pageShift;
248
gc::UnprotectPages(addr, size);
249
}
250
251
MOZ_ALWAYS_INLINE void protectUsedPartial(size_t curr, size_t next) {
252
if (MOZ_LIKELY(!protectUsedEnabled)) {
253
return;
254
}
255
if (MOZ_UNLIKELY(curr < initPage)) {
256
++curr;
257
}
258
if (MOZ_UNLIKELY(next == curr)) {
259
return;
260
}
261
void* addr = reinterpret_cast<T*>(curr << pageShift);
262
size_t size = (next - curr) << pageShift;
263
gc::MakePagesReadOnly(addr, size);
264
}
265
266
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool reserveNewBuffer(size_t size) {
267
unprotectOldBuffer();
268
bool ret = vector.reserve(size);
269
protectNewBuffer();
270
return ret;
271
}
272
273
template <typename U>
274
MOZ_ALWAYS_INLINE void infallibleAppendNewPage(const U* values, size_t size) {
275
size_t nextPage = uintptr_t(begin() + length() + size) >> pageShift;
276
MOZ_ASSERT(currPage < nextPage);
277
unprotectUnusedPartial(currPage, nextPage);
278
vector.infallibleAppend(values, size);
279
protectUsedPartial(currPage, nextPage);
280
currPage = nextPage;
281
resetTest();
282
}
283
284
template <typename U>
285
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool appendNewPage(const U* values,
286
size_t size) {
287
size_t nextPage = uintptr_t(begin() + length() + size) >> pageShift;
288
MOZ_ASSERT(currPage < nextPage);
289
unprotectUnusedPartial(currPage, nextPage);
290
bool ret = vector.append(values, size);
291
if (MOZ_LIKELY(ret)) {
292
protectUsedPartial(currPage, nextPage);
293
currPage = nextPage;
294
} else {
295
protectUnusedPartial(currPage, nextPage);
296
}
297
resetTest();
298
return ret;
299
}
300
301
template <typename U>
302
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool appendNewBuffer(const U* values,
303
size_t size) {
304
unprotectOldBuffer();
305
bool ret = vector.append(values, size);
306
protectNewBuffer();
307
return ret;
308
}
309
310
MOZ_NEVER_INLINE void unprotectRegionSlow(uintptr_t l, uintptr_t r);
311
MOZ_NEVER_INLINE void reprotectRegionSlow(uintptr_t l, uintptr_t r);
312
313
MOZ_NEVER_INLINE MOZ_MUST_USE bool reserveSlow(size_t size);
314
315
template <typename U>
316
MOZ_NEVER_INLINE void infallibleAppendSlow(const U* values, size_t size);
317
318
template <typename U>
319
MOZ_NEVER_INLINE MOZ_MUST_USE bool appendSlow(const U* values, size_t size);
320
321
public:
322
explicit PageProtectingVector(AllocPolicy policy = AllocPolicy())
323
: vector(std::move(policy)),
324
elemsUntilTest(0),
325
currPage(0),
326
initPage(0),
327
lastPage(0),
328
lowerBound(InitialLowerBound),
329
#ifdef DEBUG
330
regionUnprotected(false),
331
#endif
332
usable(true),
333
enabled(true),
334
protectUsedEnabled(false),
335
protectUnusedEnabled(false) {
336
if (gc::SystemPageSize() != pageSize) {
337
usable = false;
338
}
339
protectNewBuffer();
340
}
341
342
~PageProtectingVector() { unprotectOldBuffer(); }
343
344
void disableProtection() {
345
MOZ_ASSERT(enabled);
346
unprotectOldBuffer();
347
enabled = false;
348
resetForNewBuffer();
349
}
350
351
void enableProtection() {
352
MOZ_ASSERT(!enabled);
353
enabled = true;
354
protectNewBuffer();
355
}
356
357
/*
358
* Sets the lower bound on the size, in elems, that this vector's underlying
359
* capacity has to be before its used pages will be protected.
360
*/
361
void setLowerBoundForProtection(size_t elems) {
362
if (lowerBound != elems) {
363
unprotectOldBuffer();
364
lowerBound = elems;
365
protectNewBuffer();
366
}
367
}
368
369
/* Disable protection on the smallest containing region. */
370
MOZ_ALWAYS_INLINE void unprotectRegion(T* first, size_t size) {
371
#ifdef DEBUG
372
regionUnprotected = true;
373
#endif
374
if (MOZ_UNLIKELY(protectUsedEnabled)) {
375
uintptr_t l = uintptr_t(first) >> pageShift;
376
uintptr_t r = uintptr_t(first + size - 1) >> pageShift;
377
if (r >= initPage && l < currPage) {
378
unprotectRegionSlow(l, r);
379
}
380
}
381
}
382
383
/* Re-enable protection on the smallest containing region. */
384
MOZ_ALWAYS_INLINE void reprotectRegion(T* first, size_t size) {
385
#ifdef DEBUG
386
regionUnprotected = false;
387
#endif
388
if (MOZ_UNLIKELY(protectUsedEnabled)) {
389
uintptr_t l = uintptr_t(first) >> pageShift;
390
uintptr_t r = uintptr_t(first + size - 1) >> pageShift;
391
if (r >= initPage && l < currPage) {
392
reprotectRegionSlow(l, r);
393
}
394
}
395
}
396
397
MOZ_ALWAYS_INLINE size_t capacity() const { return vector.capacity(); }
398
MOZ_ALWAYS_INLINE size_t length() const { return vector.length(); }
399
400
MOZ_ALWAYS_INLINE T* begin() { return vector.begin(); }
401
MOZ_ALWAYS_INLINE const T* begin() const { return vector.begin(); }
402
403
void clear() {
404
unprotectOldBuffer();
405
vector.clear();
406
protectNewBuffer();
407
}
408
409
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool reserve(size_t size) {
410
if (MOZ_LIKELY(size <= capacity())) {
411
return vector.reserve(size);
412
}
413
return reserveSlow(size);
414
}
415
416
template <typename U>
417
MOZ_ALWAYS_INLINE void infallibleAppend(const U* values, size_t size) {
418
elemsUntilTest -= size;
419
if (MOZ_LIKELY(elemsUntilTest >= 0)) {
420
return vector.infallibleAppend(values, size);
421
}
422
infallibleAppendSlow(values, size);
423
}
424
425
template <typename U>
426
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool append(const U* values, size_t size) {
427
elemsUntilTest -= size;
428
if (MOZ_LIKELY(elemsUntilTest >= 0)) {
429
return vector.append(values, size);
430
}
431
return appendSlow(values, size);
432
}
433
};
434
435
template <typename T, size_t A, class B, bool C, bool D, size_t E, bool F,
436
uint8_t G>
437
MOZ_NEVER_INLINE void
438
PageProtectingVector<T, A, B, C, D, E, F, G>::unprotectRegionSlow(uintptr_t l,
439
uintptr_t r) {
440
if (l < initPage) {
441
l = initPage;
442
}
443
if (r >= currPage) {
444
r = currPage - 1;
445
}
446
T* addr = reinterpret_cast<T*>(l << pageShift);
447
size_t size = (r - l + 1) << pageShift;
448
gc::UnprotectPages(addr, size);
449
}
450
451
template <typename T, size_t A, class B, bool C, bool D, size_t E, bool F,
452
uint8_t G>
453
MOZ_NEVER_INLINE void
454
PageProtectingVector<T, A, B, C, D, E, F, G>::reprotectRegionSlow(uintptr_t l,
455
uintptr_t r) {
456
if (l < initPage) {
457
l = initPage;
458
}
459
if (r >= currPage) {
460
r = currPage - 1;
461
}
462
T* addr = reinterpret_cast<T*>(l << pageShift);
463
size_t size = (r - l + 1) << pageShift;
464
gc::MakePagesReadOnly(addr, size);
465
}
466
467
template <typename T, size_t A, class B, bool C, bool D, size_t E, bool F,
468
uint8_t G>
469
MOZ_NEVER_INLINE MOZ_MUST_USE bool
470
PageProtectingVector<T, A, B, C, D, E, F, G>::reserveSlow(size_t size) {
471
return reserveNewBuffer(size);
472
}
473
474
template <typename T, size_t A, class B, bool C, bool D, size_t E, bool F,
475
uint8_t G>
476
template <typename U>
477
MOZ_NEVER_INLINE void
478
PageProtectingVector<T, A, B, C, D, E, F, G>::infallibleAppendSlow(
479
const U* values, size_t size) {
480
// Ensure that we're here because we reached a page
481
// boundary and not because of a buffer overflow.
482
MOZ_RELEASE_ASSERT(
483
MOZ_LIKELY(length() + size <= capacity()),
484
"About to overflow our AssemblerBuffer using infallibleAppend!");
485
infallibleAppendNewPage(values, size);
486
}
487
488
template <typename T, size_t A, class B, bool C, bool D, size_t E, bool F,
489
uint8_t G>
490
template <typename U>
491
MOZ_NEVER_INLINE MOZ_MUST_USE bool
492
PageProtectingVector<T, A, B, C, D, E, F, G>::appendSlow(const U* values,
493
size_t size) {
494
if (MOZ_LIKELY(length() + size <= capacity())) {
495
return appendNewPage(values, size);
496
}
497
return appendNewBuffer(values, size);
498
}
499
500
} /* namespace js */
501
502
#endif /* ds_PageProtectingVector_h */