Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2017 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmProcess.h"
20
21
#include "mozilla/BinarySearch.h"
22
#include "mozilla/ScopeExit.h"
23
24
#include "gc/Memory.h"
25
#include "threading/ExclusiveData.h"
26
#include "vm/MutexIDs.h"
27
#ifdef ENABLE_WASM_CRANELIFT
28
# include "wasm/cranelift/clifapi.h"
29
#endif
30
#include "wasm/WasmBuiltins.h"
31
#include "wasm/WasmCode.h"
32
#include "wasm/WasmInstance.h"
33
34
using namespace js;
35
using namespace wasm;
36
37
using mozilla::BinarySearchIf;
38
39
// Per-process map from values of program-counter (pc) to CodeSegments.
40
//
41
// Whenever a new CodeSegment is ready to use, it has to be registered so that
42
// we can have fast lookups from pc to CodeSegments in numerous places. Since
43
// wasm compilation may be tiered, and the second tier doesn't have access to
44
// any JSContext/JS::Compartment/etc lying around, we have to use a process-wide
45
// map instead.
46
47
typedef Vector<const CodeSegment*, 0, SystemAllocPolicy> CodeSegmentVector;
48
49
Atomic<bool> wasm::CodeExists(false);
50
51
// Because of profiling, the thread running wasm might need to know to which
52
// CodeSegment the current PC belongs, during a call to lookup(). A lookup
53
// is a read-only operation, and we don't want to take a lock then
54
// (otherwise, we could have a deadlock situation if an async lookup
55
// happened on a given thread that was holding mutatorsMutex_ while getting
56
// sampled). Since the writer could be modifying the data that is getting
57
// looked up, the writer functions use spin-locks to know if there are any
58
// observers (i.e. calls to lookup()) of the atomic data.
59
60
static Atomic<size_t> sNumActiveLookups(0);
61
62
class ProcessCodeSegmentMap {
63
// Since writes (insertions or removals) can happen on any background
64
// thread at the same time, we need a lock here.
65
66
Mutex mutatorsMutex_;
67
68
CodeSegmentVector segments1_;
69
CodeSegmentVector segments2_;
70
71
// Except during swapAndWait(), there are no lookup() observers of the
72
// vector pointed to by mutableCodeSegments_
73
74
CodeSegmentVector* mutableCodeSegments_;
75
Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
76
77
struct CodeSegmentPC {
78
const void* pc;
79
explicit CodeSegmentPC(const void* pc) : pc(pc) {}
80
int operator()(const CodeSegment* cs) const {
81
if (cs->containsCodePC(pc)) {
82
return 0;
83
}
84
if (pc < cs->base()) {
85
return -1;
86
}
87
return 1;
88
}
89
};
90
91
void swapAndWait() {
92
// Both vectors are consistent for lookup at this point although their
93
// contents are different: there is no way for the looked up PC to be
94
// in the code segment that is getting registered, because the code
95
// segment is not even fully created yet.
96
97
// If a lookup happens before this instruction, then the
98
// soon-to-become-former read-only pointer is used during the lookup,
99
// which is valid.
100
101
mutableCodeSegments_ = const_cast<CodeSegmentVector*>(
102
readonlyCodeSegments_.exchange(mutableCodeSegments_));
103
104
// If a lookup happens after this instruction, then the updated vector
105
// is used, which is valid:
106
// - in case of insertion, it means the new vector contains more data,
107
// but it's fine since the code segment is getting registered and thus
108
// isn't even fully created yet, so the code can't be running.
109
// - in case of removal, it means the new vector contains one less
110
// entry, but it's fine since unregistering means the code segment
111
// isn't used by any live instance anymore, thus PC can't be in the
112
// to-be-removed code segment's range.
113
114
// A lookup could have happened on any of the two vectors. Wait for
115
// observers to be done using any vector before mutating.
116
117
while (sNumActiveLookups > 0) {
118
}
119
}
120
121
public:
122
ProcessCodeSegmentMap()
123
: mutatorsMutex_(mutexid::WasmCodeSegmentMap),
124
mutableCodeSegments_(&segments1_),
125
readonlyCodeSegments_(&segments2_) {}
126
127
~ProcessCodeSegmentMap() {
128
MOZ_RELEASE_ASSERT(sNumActiveLookups == 0);
129
MOZ_ASSERT(segments1_.empty());
130
MOZ_ASSERT(segments2_.empty());
131
segments1_.clearAndFree();
132
segments2_.clearAndFree();
133
}
134
135
bool insert(const CodeSegment* cs) {
136
LockGuard<Mutex> lock(mutatorsMutex_);
137
138
size_t index;
139
MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
140
mutableCodeSegments_->length(),
141
CodeSegmentPC(cs->base()), &index));
142
143
if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
144
cs)) {
145
return false;
146
}
147
148
CodeExists = true;
149
150
swapAndWait();
151
152
#ifdef DEBUG
153
size_t otherIndex;
154
MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
155
mutableCodeSegments_->length(),
156
CodeSegmentPC(cs->base()), &otherIndex));
157
MOZ_ASSERT(index == otherIndex);
158
#endif
159
160
// Although we could simply revert the insertion in the read-only
161
// vector, it is simpler to just crash and given that each CodeSegment
162
// consumes multiple pages, it is unlikely this insert() would OOM in
163
// practice
164
AutoEnterOOMUnsafeRegion oom;
165
if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
166
cs)) {
167
oom.crash("when inserting a CodeSegment in the process-wide map");
168
}
169
170
return true;
171
}
172
173
void remove(const CodeSegment* cs) {
174
LockGuard<Mutex> lock(mutatorsMutex_);
175
176
size_t index;
177
MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
178
mutableCodeSegments_->length(),
179
CodeSegmentPC(cs->base()), &index));
180
181
mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
182
183
if (!mutableCodeSegments_->length()) {
184
CodeExists = false;
185
}
186
187
swapAndWait();
188
189
#ifdef DEBUG
190
size_t otherIndex;
191
MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
192
mutableCodeSegments_->length(),
193
CodeSegmentPC(cs->base()), &otherIndex));
194
MOZ_ASSERT(index == otherIndex);
195
#endif
196
197
mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
198
}
199
200
const CodeSegment* lookup(const void* pc) {
201
const CodeSegmentVector* readonly = readonlyCodeSegments_;
202
203
size_t index;
204
if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc),
205
&index)) {
206
return nullptr;
207
}
208
209
// It is fine returning a raw CodeSegment*, because we assume we are
210
// looking up a live PC in code which is on the stack, keeping the
211
// CodeSegment alive.
212
213
return (*readonly)[index];
214
}
215
};
216
217
// This field is only atomic to handle buggy scenarios where we crash during
218
// startup or shutdown and thus racily perform wasm::LookupCodeSegment() from
219
// the crashing thread.
220
221
static Atomic<ProcessCodeSegmentMap*> sProcessCodeSegmentMap(nullptr);
222
223
bool wasm::RegisterCodeSegment(const CodeSegment* cs) {
224
MOZ_ASSERT(cs->codeTier().code().initialized());
225
226
// This function cannot race with startup/shutdown.
227
ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
228
MOZ_RELEASE_ASSERT(map);
229
return map->insert(cs);
230
}
231
232
void wasm::UnregisterCodeSegment(const CodeSegment* cs) {
233
// This function cannot race with startup/shutdown.
234
ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
235
MOZ_RELEASE_ASSERT(map);
236
map->remove(cs);
237
}
238
239
const CodeSegment* wasm::LookupCodeSegment(
240
const void* pc, const CodeRange** codeRange /*= nullptr */) {
241
// Since wasm::LookupCodeSegment() can race with wasm::ShutDown(), we must
242
// additionally keep sNumActiveLookups above zero for the duration we're
243
// using the ProcessCodeSegmentMap. wasm::ShutDown() spin-waits on
244
// sNumActiveLookups getting to zero.
245
246
auto decObserver = mozilla::MakeScopeExit([&] {
247
MOZ_ASSERT(sNumActiveLookups > 0);
248
sNumActiveLookups--;
249
});
250
sNumActiveLookups++;
251
252
ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
253
if (!map) {
254
return nullptr;
255
}
256
257
if (const CodeSegment* found = map->lookup(pc)) {
258
if (codeRange) {
259
*codeRange = found->isModule() ? found->asModule()->lookupRange(pc)
260
: found->asLazyStub()->lookupRange(pc);
261
}
262
return found;
263
}
264
265
if (codeRange) {
266
*codeRange = nullptr;
267
}
268
269
return nullptr;
270
}
271
272
const Code* wasm::LookupCode(const void* pc,
273
const CodeRange** codeRange /* = nullptr */) {
274
const CodeSegment* found = LookupCodeSegment(pc, codeRange);
275
MOZ_ASSERT_IF(!found && codeRange, !*codeRange);
276
return found ? &found->code() : nullptr;
277
}
278
279
bool wasm::InCompiledCode(void* pc) {
280
if (LookupCodeSegment(pc)) {
281
return true;
282
}
283
284
const CodeRange* codeRange;
285
uint8_t* codeBase;
286
return LookupBuiltinThunk(pc, &codeRange, &codeBase);
287
}
288
289
/**
290
* ReadLockFlag maintains a flag that can be mutated multiple times before it
291
* is read, at which point it maintains the same value.
292
*/
293
class ReadLockFlag {
294
private:
295
bool enabled_;
296
bool read_;
297
298
public:
299
ReadLockFlag() : enabled_(false), read_(false) {}
300
301
bool get() {
302
read_ = true;
303
return enabled_;
304
}
305
306
bool set(bool enabled) {
307
if (read_) {
308
return false;
309
}
310
enabled_ = enabled;
311
return true;
312
}
313
};
314
315
#ifdef WASM_SUPPORTS_HUGE_MEMORY
316
/*
317
* Some 64 bit systems greatly limit the range of available virtual memory. We
318
* require about 6GiB for each wasm huge memory, which can exhaust the address
319
* spaces of these systems quickly. In order to avoid this, we only enable huge
320
* memory if we observe a large enough address space.
321
*
322
* This number is conservatively chosen to continue using huge memory on our
323
* smallest address space system, Android on ARM64 (39 bits), along with a bit
324
* for error in detecting the address space limit.
325
*/
326
static const size_t MinAddressBitsForHugeMemory = 38;
327
328
/*
329
* In addition to the above, some systems impose an independent limit on the
330
* amount of virtual memory that may be used.
331
*/
332
static const size_t MinVirtualMemoryLimitForHugeMemory =
333
size_t(1) << MinAddressBitsForHugeMemory;
334
#endif
335
336
ExclusiveData<ReadLockFlag> sHugeMemoryEnabled(mutexid::WasmHugeMemoryEnabled);
337
338
bool wasm::IsHugeMemoryEnabled() {
339
auto state = sHugeMemoryEnabled.lock();
340
return state->get();
341
}
342
343
bool wasm::DisableHugeMemory() {
344
auto state = sHugeMemoryEnabled.lock();
345
return state->set(false);
346
}
347
348
void ConfigureHugeMemory() {
349
#ifdef WASM_SUPPORTS_HUGE_MEMORY
350
if (gc::SystemAddressBits() < MinAddressBitsForHugeMemory) {
351
return;
352
}
353
354
if (gc::VirtualMemoryLimit() != size_t(-1) &&
355
gc::VirtualMemoryLimit() < MinVirtualMemoryLimitForHugeMemory) {
356
return;
357
}
358
359
auto state = sHugeMemoryEnabled.lock();
360
bool set = state->set(true);
361
MOZ_RELEASE_ASSERT(set);
362
#endif
363
}
364
365
bool wasm::Init() {
366
MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap);
367
368
ConfigureHugeMemory();
369
370
#ifdef ENABLE_WASM_CRANELIFT
371
cranelift_initialize();
372
#endif
373
374
ProcessCodeSegmentMap* map = js_new<ProcessCodeSegmentMap>();
375
if (!map) {
376
return false;
377
}
378
379
sProcessCodeSegmentMap = map;
380
return true;
381
}
382
383
void wasm::ShutDown() {
384
// If there are live runtimes then we are already pretty much leaking the
385
// world, so to avoid spurious assertions (which are valid and valuable when
386
// there are not live JSRuntimes), don't bother releasing anything here.
387
if (JSRuntime::hasLiveRuntimes()) {
388
return;
389
}
390
391
// After signalling shutdown by clearing sProcessCodeSegmentMap, wait for
392
// concurrent wasm::LookupCodeSegment()s to finish.
393
ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
394
MOZ_RELEASE_ASSERT(map);
395
sProcessCodeSegmentMap = nullptr;
396
while (sNumActiveLookups > 0) {
397
}
398
399
ReleaseBuiltinThunks();
400
js_delete(map);
401
}