Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2016 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmCode.h"
20
21
#include "mozilla/BinarySearch.h"
22
#include "mozilla/EnumeratedRange.h"
23
24
#include <algorithm>
25
26
#include "jsnum.h"
27
28
#include "jit/ExecutableAllocator.h"
29
#ifdef JS_ION_PERF
30
# include "jit/PerfSpewer.h"
31
#endif
32
#include "util/Poison.h"
33
#ifdef MOZ_VTUNE
34
# include "vtune/VTuneWrapper.h"
35
#endif
36
#include "wasm/WasmModule.h"
37
#include "wasm/WasmProcess.h"
38
#include "wasm/WasmSerialize.h"
39
#include "wasm/WasmStubs.h"
40
41
#include "jit/MacroAssembler-inl.h"
42
43
using namespace js;
44
using namespace js::jit;
45
using namespace js::wasm;
46
using mozilla::BinarySearch;
47
using mozilla::MakeEnumeratedRange;
48
using mozilla::PodAssign;
49
50
size_t LinkData::SymbolicLinkArray::serializedSize() const {
51
size_t size = 0;
52
for (const Uint32Vector& offsets : *this) {
53
size += SerializedPodVectorSize(offsets);
54
}
55
return size;
56
}
57
58
uint8_t* LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const {
59
for (const Uint32Vector& offsets : *this) {
60
cursor = SerializePodVector(cursor, offsets);
61
}
62
return cursor;
63
}
64
65
const uint8_t* LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor) {
66
for (Uint32Vector& offsets : *this) {
67
cursor = DeserializePodVector(cursor, &offsets);
68
if (!cursor) {
69
return nullptr;
70
}
71
}
72
return cursor;
73
}
74
75
size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis(
76
MallocSizeOf mallocSizeOf) const {
77
size_t size = 0;
78
for (const Uint32Vector& offsets : *this) {
79
size += offsets.sizeOfExcludingThis(mallocSizeOf);
80
}
81
return size;
82
}
83
84
size_t LinkData::serializedSize() const {
85
return sizeof(pod()) + SerializedPodVectorSize(internalLinks) +
86
symbolicLinks.serializedSize();
87
}
88
89
uint8_t* LinkData::serialize(uint8_t* cursor) const {
90
MOZ_ASSERT(tier == Tier::Serialized);
91
92
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
93
cursor = SerializePodVector(cursor, internalLinks);
94
cursor = symbolicLinks.serialize(cursor);
95
return cursor;
96
}
97
98
const uint8_t* LinkData::deserialize(const uint8_t* cursor) {
99
MOZ_ASSERT(tier == Tier::Serialized);
100
101
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
102
(cursor = DeserializePodVector(cursor, &internalLinks)) &&
103
(cursor = symbolicLinks.deserialize(cursor));
104
return cursor;
105
}
106
107
CodeSegment::~CodeSegment() {
108
if (unregisterOnDestroy_) {
109
UnregisterCodeSegment(this);
110
}
111
}
112
113
static uint32_t RoundupCodeLength(uint32_t codeLength) {
114
// AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
115
return RoundUp(codeLength, ExecutableCodePageSize);
116
}
117
118
/* static */
119
UniqueCodeBytes CodeSegment::AllocateCodeBytes(uint32_t codeLength) {
120
if (codeLength > MaxCodeBytesPerProcess) {
121
return nullptr;
122
}
123
124
static_assert(MaxCodeBytesPerProcess <= INT32_MAX, "rounding won't overflow");
125
uint32_t roundedCodeLength = RoundupCodeLength(codeLength);
126
127
void* p =
128
AllocateExecutableMemory(roundedCodeLength, ProtectionSetting::Writable,
129
MemCheckKind::MakeUndefined);
130
131
// If the allocation failed and the embedding gives us a last-ditch attempt
132
// to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
133
// then retry the allocation.
134
if (!p) {
135
if (OnLargeAllocationFailure) {
136
OnLargeAllocationFailure();
137
p = AllocateExecutableMemory(roundedCodeLength,
138
ProtectionSetting::Writable,
139
MemCheckKind::MakeUndefined);
140
}
141
}
142
143
if (!p) {
144
return nullptr;
145
}
146
147
// Zero the padding.
148
memset(((uint8_t*)p) + codeLength, 0, roundedCodeLength - codeLength);
149
150
// We account for the bytes allocated in WasmModuleObject::create, where we
151
// have the necessary JSContext.
152
153
return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength));
154
}
155
156
bool CodeSegment::initialize(const CodeTier& codeTier) {
157
MOZ_ASSERT(!initialized());
158
codeTier_ = &codeTier;
159
MOZ_ASSERT(initialized());
160
161
// In the case of tiering, RegisterCodeSegment() immediately makes this code
162
// segment live to access from other threads executing the containing
163
// module. So only call once the CodeSegment is fully initialized.
164
if (!RegisterCodeSegment(this)) {
165
return false;
166
}
167
168
// This bool is only used by the destructor which cannot be called racily
169
// and so it is not a problem to mutate it after RegisterCodeSegment().
170
MOZ_ASSERT(!unregisterOnDestroy_);
171
unregisterOnDestroy_ = true;
172
return true;
173
}
174
175
const Code& CodeSegment::code() const {
176
MOZ_ASSERT(codeTier_);
177
return codeTier_->code();
178
}
179
180
void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const {
181
*code += RoundupCodeLength(length());
182
}
183
184
void FreeCode::operator()(uint8_t* bytes) {
185
MOZ_ASSERT(codeLength);
186
MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength));
187
188
#ifdef MOZ_VTUNE
189
vtune::UnmarkBytes(bytes, codeLength);
190
#endif
191
DeallocateExecutableMemory(bytes, codeLength);
192
}
193
194
static bool StaticallyLink(const ModuleSegment& ms, const LinkData& linkData) {
195
for (LinkData::InternalLink link : linkData.internalLinks) {
196
CodeLabel label;
197
label.patchAt()->bind(link.patchAtOffset);
198
label.target()->bind(link.targetOffset);
199
#ifdef JS_CODELABEL_LINKMODE
200
label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
201
#endif
202
Assembler::Bind(ms.base(), label);
203
}
204
205
if (!EnsureBuiltinThunksInitialized()) {
206
return false;
207
}
208
209
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
210
const Uint32Vector& offsets = linkData.symbolicLinks[imm];
211
if (offsets.empty()) {
212
continue;
213
}
214
215
void* target = SymbolicAddressTarget(imm);
216
for (uint32_t offset : offsets) {
217
uint8_t* patchAt = ms.base() + offset;
218
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
219
PatchedImmPtr(target),
220
PatchedImmPtr((void*)-1));
221
}
222
}
223
224
return true;
225
}
226
227
static void StaticallyUnlink(uint8_t* base, const LinkData& linkData) {
228
for (LinkData::InternalLink link : linkData.internalLinks) {
229
CodeLabel label;
230
label.patchAt()->bind(link.patchAtOffset);
231
label.target()->bind(-size_t(base)); // to reset immediate to null
232
#ifdef JS_CODELABEL_LINKMODE
233
label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
234
#endif
235
Assembler::Bind(base, label);
236
}
237
238
for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
239
const Uint32Vector& offsets = linkData.symbolicLinks[imm];
240
if (offsets.empty()) {
241
continue;
242
}
243
244
void* target = SymbolicAddressTarget(imm);
245
for (uint32_t offset : offsets) {
246
uint8_t* patchAt = base + offset;
247
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
248
PatchedImmPtr((void*)-1),
249
PatchedImmPtr(target));
250
}
251
}
252
}
253
254
#ifdef JS_ION_PERF
255
static bool AppendToString(const char* str, UTF8Bytes* bytes) {
256
return bytes->append(str, strlen(str)) && bytes->append('\0');
257
}
258
#endif
259
260
static void SendCodeRangesToProfiler(const ModuleSegment& ms,
261
const Metadata& metadata,
262
const CodeRangeVector& codeRanges) {
263
bool enabled = false;
264
#ifdef JS_ION_PERF
265
enabled |= PerfFuncEnabled();
266
#endif
267
#ifdef MOZ_VTUNE
268
enabled |= vtune::IsProfilingActive();
269
#endif
270
if (!enabled) {
271
return;
272
}
273
274
for (const CodeRange& codeRange : codeRanges) {
275
if (!codeRange.hasFuncIndex()) {
276
continue;
277
}
278
279
uintptr_t start = uintptr_t(ms.base() + codeRange.begin());
280
uintptr_t size = codeRange.end() - codeRange.begin();
281
282
UTF8Bytes name;
283
if (!metadata.getFuncNameStandalone(codeRange.funcIndex(), &name)) {
284
return;
285
}
286
287
// Avoid "unused" warnings
288
(void)start;
289
(void)size;
290
291
#ifdef JS_ION_PERF
292
if (PerfFuncEnabled()) {
293
const char* file = metadata.filename.get();
294
if (codeRange.isFunction()) {
295
if (!name.append('\0')) {
296
return;
297
}
298
unsigned line = codeRange.funcLineOrBytecode();
299
writePerfSpewerWasmFunctionMap(start, size, file, line, name.begin());
300
} else if (codeRange.isInterpEntry()) {
301
if (!AppendToString(" slow entry", &name)) {
302
return;
303
}
304
writePerfSpewerWasmMap(start, size, file, name.begin());
305
} else if (codeRange.isJitEntry()) {
306
if (!AppendToString(" fast entry", &name)) {
307
return;
308
}
309
writePerfSpewerWasmMap(start, size, file, name.begin());
310
} else if (codeRange.isImportInterpExit()) {
311
if (!AppendToString(" slow exit", &name)) {
312
return;
313
}
314
writePerfSpewerWasmMap(start, size, file, name.begin());
315
} else if (codeRange.isImportJitExit()) {
316
if (!AppendToString(" fast exit", &name)) {
317
return;
318
}
319
writePerfSpewerWasmMap(start, size, file, name.begin());
320
} else {
321
MOZ_CRASH("unhandled perf hasFuncIndex type");
322
}
323
}
324
#endif
325
#ifdef MOZ_VTUNE
326
if (!vtune::IsProfilingActive()) {
327
continue;
328
}
329
if (!codeRange.isFunction()) {
330
continue;
331
}
332
if (!name.append('\0')) {
333
return;
334
}
335
vtune::MarkWasm(vtune::GenerateUniqueMethodID(), name.begin(), (void*)start,
336
size);
337
#endif
338
}
339
}
340
341
ModuleSegment::ModuleSegment(Tier tier, UniqueCodeBytes codeBytes,
342
uint32_t codeLength, const LinkData& linkData)
343
: CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module),
344
tier_(tier),
345
trapCode_(base() + linkData.trapOffset) {}
346
347
/* static */
348
UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm,
349
const LinkData& linkData) {
350
uint32_t codeLength = masm.bytesNeeded();
351
352
UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
353
if (!codeBytes) {
354
return nullptr;
355
}
356
357
masm.executableCopy(codeBytes.get());
358
359
return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
360
linkData);
361
}
362
363
/* static */
364
UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes,
365
const LinkData& linkData) {
366
uint32_t codeLength = unlinkedBytes.length();
367
368
UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
369
if (!codeBytes) {
370
return nullptr;
371
}
372
373
memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength);
374
375
return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
376
linkData);
377
}
378
379
bool ModuleSegment::initialize(const CodeTier& codeTier,
380
const LinkData& linkData,
381
const Metadata& metadata,
382
const MetadataTier& metadataTier) {
383
if (!StaticallyLink(*this, linkData)) {
384
return false;
385
}
386
387
// Reprotect the whole region to avoid having separate RW and RX mappings.
388
if (!ExecutableAllocator::makeExecutableAndFlushICache(
389
base(), RoundupCodeLength(length()))) {
390
return false;
391
}
392
393
SendCodeRangesToProfiler(*this, metadata, metadataTier.codeRanges);
394
395
// See comments in CodeSegment::initialize() for why this must be last.
396
return CodeSegment::initialize(codeTier);
397
}
398
399
size_t ModuleSegment::serializedSize() const {
400
return sizeof(uint32_t) + length();
401
}
402
403
void ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf,
404
size_t* code, size_t* data) const {
405
CodeSegment::addSizeOfMisc(mallocSizeOf, code);
406
*data += mallocSizeOf(this);
407
}
408
409
uint8_t* ModuleSegment::serialize(uint8_t* cursor,
410
const LinkData& linkData) const {
411
MOZ_ASSERT(tier() == Tier::Serialized);
412
413
cursor = WriteScalar<uint32_t>(cursor, length());
414
uint8_t* serializedBase = cursor;
415
cursor = WriteBytes(cursor, base(), length());
416
StaticallyUnlink(serializedBase, linkData);
417
return cursor;
418
}
419
420
/* static */ const uint8_t* ModuleSegment::deserialize(
421
const uint8_t* cursor, const LinkData& linkData,
422
UniqueModuleSegment* segment) {
423
uint32_t length;
424
cursor = ReadScalar<uint32_t>(cursor, &length);
425
if (!cursor) {
426
return nullptr;
427
}
428
429
UniqueCodeBytes bytes = AllocateCodeBytes(length);
430
if (!bytes) {
431
return nullptr;
432
}
433
434
cursor = ReadBytes(cursor, bytes.get(), length);
435
if (!cursor) {
436
return nullptr;
437
}
438
439
*segment = js::MakeUnique<ModuleSegment>(Tier::Serialized, std::move(bytes),
440
length, linkData);
441
if (!*segment) {
442
return nullptr;
443
}
444
445
return cursor;
446
}
447
448
const CodeRange* ModuleSegment::lookupRange(const void* pc) const {
449
return codeTier().lookupRange(pc);
450
}
451
452
size_t FuncExport::serializedSize() const {
453
return funcType_.serializedSize() + sizeof(pod);
454
}
455
456
uint8_t* FuncExport::serialize(uint8_t* cursor) const {
457
cursor = funcType_.serialize(cursor);
458
cursor = WriteBytes(cursor, &pod, sizeof(pod));
459
return cursor;
460
}
461
462
const uint8_t* FuncExport::deserialize(const uint8_t* cursor) {
463
(cursor = funcType_.deserialize(cursor)) &&
464
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
465
return cursor;
466
}
467
468
size_t FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
469
return funcType_.sizeOfExcludingThis(mallocSizeOf);
470
}
471
472
size_t FuncImport::serializedSize() const {
473
return funcType_.serializedSize() + sizeof(pod);
474
}
475
476
uint8_t* FuncImport::serialize(uint8_t* cursor) const {
477
cursor = funcType_.serialize(cursor);
478
cursor = WriteBytes(cursor, &pod, sizeof(pod));
479
return cursor;
480
}
481
482
const uint8_t* FuncImport::deserialize(const uint8_t* cursor) {
483
(cursor = funcType_.deserialize(cursor)) &&
484
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
485
return cursor;
486
}
487
488
size_t FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
489
return funcType_.sizeOfExcludingThis(mallocSizeOf);
490
}
491
492
static size_t StringLengthWithNullChar(const char* chars) {
493
return chars ? strlen(chars) + 1 : 0;
494
}
495
496
size_t CacheableChars::serializedSize() const {
497
return sizeof(uint32_t) + StringLengthWithNullChar(get());
498
}
499
500
uint8_t* CacheableChars::serialize(uint8_t* cursor) const {
501
uint32_t lengthWithNullChar = StringLengthWithNullChar(get());
502
cursor = WriteScalar<uint32_t>(cursor, lengthWithNullChar);
503
cursor = WriteBytes(cursor, get(), lengthWithNullChar);
504
return cursor;
505
}
506
507
const uint8_t* CacheableChars::deserialize(const uint8_t* cursor) {
508
uint32_t lengthWithNullChar;
509
cursor = ReadBytes(cursor, &lengthWithNullChar, sizeof(uint32_t));
510
511
if (lengthWithNullChar) {
512
reset(js_pod_malloc<char>(lengthWithNullChar));
513
if (!get()) {
514
return nullptr;
515
}
516
517
cursor = ReadBytes(cursor, get(), lengthWithNullChar);
518
} else {
519
MOZ_ASSERT(!get());
520
}
521
522
return cursor;
523
}
524
525
size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
526
return mallocSizeOf(get());
527
}
528
529
size_t MetadataTier::serializedSize() const {
530
return SerializedPodVectorSize(funcToCodeRange) +
531
SerializedPodVectorSize(codeRanges) +
532
SerializedPodVectorSize(callSites) + trapSites.serializedSize() +
533
SerializedVectorSize(funcImports) + SerializedVectorSize(funcExports);
534
}
535
536
size_t MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
537
return funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) +
538
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
539
callSites.sizeOfExcludingThis(mallocSizeOf) +
540
trapSites.sizeOfExcludingThis(mallocSizeOf) +
541
SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
542
SizeOfVectorExcludingThis(funcExports, mallocSizeOf);
543
}
544
545
uint8_t* MetadataTier::serialize(uint8_t* cursor) const {
546
cursor = SerializePodVector(cursor, funcToCodeRange);
547
cursor = SerializePodVector(cursor, codeRanges);
548
cursor = SerializePodVector(cursor, callSites);
549
cursor = trapSites.serialize(cursor);
550
cursor = SerializeVector(cursor, funcImports);
551
cursor = SerializeVector(cursor, funcExports);
552
MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
553
return cursor;
554
}
555
556
/* static */ const uint8_t* MetadataTier::deserialize(const uint8_t* cursor) {
557
(cursor = DeserializePodVector(cursor, &funcToCodeRange)) &&
558
(cursor = DeserializePodVector(cursor, &codeRanges)) &&
559
(cursor = DeserializePodVector(cursor, &callSites)) &&
560
(cursor = trapSites.deserialize(cursor)) &&
561
(cursor = DeserializeVector(cursor, &funcImports)) &&
562
(cursor = DeserializeVector(cursor, &funcExports));
563
MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
564
return cursor;
565
}
566
567
UniqueLazyStubSegment LazyStubSegment::create(const CodeTier& codeTier,
568
size_t length) {
569
UniqueCodeBytes codeBytes = AllocateCodeBytes(length);
570
if (!codeBytes) {
571
return nullptr;
572
}
573
574
auto segment = js::MakeUnique<LazyStubSegment>(std::move(codeBytes), length);
575
if (!segment || !segment->initialize(codeTier)) {
576
return nullptr;
577
}
578
579
return segment;
580
}
581
582
bool LazyStubSegment::hasSpace(size_t bytes) const {
583
MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes);
584
return bytes <= length() && usedBytes_ <= length() - bytes;
585
}
586
587
bool LazyStubSegment::addStubs(size_t codeLength,
588
const Uint32Vector& funcExportIndices,
589
const FuncExportVector& funcExports,
590
const CodeRangeVector& codeRanges,
591
uint8_t** codePtr,
592
size_t* indexFirstInsertedCodeRange) {
593
MOZ_ASSERT(hasSpace(codeLength));
594
595
size_t offsetInSegment = usedBytes_;
596
*codePtr = base() + usedBytes_;
597
usedBytes_ += codeLength;
598
599
*indexFirstInsertedCodeRange = codeRanges_.length();
600
601
if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length())) {
602
return false;
603
}
604
605
size_t i = 0;
606
for (uint32_t funcExportIndex : funcExportIndices) {
607
const CodeRange& interpRange = codeRanges[i];
608
MOZ_ASSERT(interpRange.isInterpEntry());
609
MOZ_ASSERT(interpRange.funcIndex() ==
610
funcExports[funcExportIndex].funcIndex());
611
612
codeRanges_.infallibleAppend(interpRange);
613
codeRanges_.back().offsetBy(offsetInSegment);
614
i++;
615
616
if (funcExports[funcExportIndex]
617
.funcType()
618
.temporarilyUnsupportedReftypeForEntry()) {
619
continue;
620
}
621
622
const CodeRange& jitRange = codeRanges[i];
623
MOZ_ASSERT(jitRange.isJitEntry());
624
MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex());
625
626
codeRanges_.infallibleAppend(jitRange);
627
codeRanges_.back().offsetBy(offsetInSegment);
628
i++;
629
}
630
631
return true;
632
}
633
634
const CodeRange* LazyStubSegment::lookupRange(const void* pc) const {
635
return LookupInSorted(codeRanges_,
636
CodeRange::OffsetInCode((uint8_t*)pc - base()));
637
}
638
639
void LazyStubSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
640
size_t* data) const {
641
CodeSegment::addSizeOfMisc(mallocSizeOf, code);
642
*data += codeRanges_.sizeOfExcludingThis(mallocSizeOf);
643
*data += mallocSizeOf(this);
644
}
645
646
struct ProjectLazyFuncIndex {
647
const LazyFuncExportVector& funcExports;
648
explicit ProjectLazyFuncIndex(const LazyFuncExportVector& funcExports)
649
: funcExports(funcExports) {}
650
uint32_t operator[](size_t index) const {
651
return funcExports[index].funcIndex;
652
}
653
};
654
655
static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024;
656
657
bool LazyStubTier::createMany(const Uint32Vector& funcExportIndices,
658
const CodeTier& codeTier,
659
size_t* stubSegmentIndex) {
660
MOZ_ASSERT(funcExportIndices.length());
661
662
LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE);
663
TempAllocator alloc(&lifo);
664
JitContext jitContext(&alloc);
665
WasmMacroAssembler masm(alloc);
666
667
const MetadataTier& metadata = codeTier.metadata();
668
const FuncExportVector& funcExports = metadata.funcExports;
669
uint8_t* moduleSegmentBase = codeTier.segment().base();
670
671
bool bigIntEnabled = codeTier.code().metadata().bigIntEnabled;
672
673
CodeRangeVector codeRanges;
674
DebugOnly<uint32_t> numExpectedRanges = 0;
675
for (uint32_t funcExportIndex : funcExportIndices) {
676
const FuncExport& fe = funcExports[funcExportIndex];
677
numExpectedRanges +=
678
fe.funcType().temporarilyUnsupportedReftypeForEntry() ? 1 : 2;
679
void* calleePtr =
680
moduleSegmentBase + metadata.codeRange(fe).funcNormalEntry();
681
Maybe<ImmPtr> callee;
682
callee.emplace(calleePtr, ImmPtr::NoCheckToken());
683
if (!GenerateEntryStubs(masm, funcExportIndex, fe, callee,
684
/* asmjs */ false, bigIntEnabled, &codeRanges)) {
685
return false;
686
}
687
}
688
MOZ_ASSERT(codeRanges.length() == numExpectedRanges,
689
"incorrect number of entries per function");
690
691
masm.finish();
692
693
MOZ_ASSERT(masm.callSites().empty());
694
MOZ_ASSERT(masm.callSiteTargets().empty());
695
MOZ_ASSERT(masm.trapSites().empty());
696
697
if (masm.oom()) {
698
return false;
699
}
700
701
size_t codeLength = LazyStubSegment::AlignBytesNeeded(masm.bytesNeeded());
702
703
if (!stubSegments_.length() ||
704
!stubSegments_[lastStubSegmentIndex_]->hasSpace(codeLength)) {
705
size_t newSegmentSize = std::max(codeLength, ExecutableCodePageSize);
706
UniqueLazyStubSegment newSegment =
707
LazyStubSegment::create(codeTier, newSegmentSize);
708
if (!newSegment) {
709
return false;
710
}
711
lastStubSegmentIndex_ = stubSegments_.length();
712
if (!stubSegments_.emplaceBack(std::move(newSegment))) {
713
return false;
714
}
715
}
716
717
LazyStubSegment* segment = stubSegments_[lastStubSegmentIndex_].get();
718
*stubSegmentIndex = lastStubSegmentIndex_;
719
720
size_t interpRangeIndex;
721
uint8_t* codePtr = nullptr;
722
if (!segment->addStubs(codeLength, funcExportIndices, funcExports, codeRanges,
723
&codePtr, &interpRangeIndex))
724
return false;
725
726
masm.executableCopy(codePtr);
727
PatchDebugSymbolicAccesses(codePtr, masm);
728
memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded());
729
730
for (const CodeLabel& label : masm.codeLabels()) {
731
Assembler::Bind(codePtr, label);
732
}
733
734
if (!ExecutableAllocator::makeExecutableAndFlushICache(codePtr, codeLength)) {
735
return false;
736
}
737
738
// Create lazy function exports for funcIndex -> entry lookup.
739
if (!exports_.reserve(exports_.length() + funcExportIndices.length())) {
740
return false;
741
}
742
743
for (uint32_t funcExportIndex : funcExportIndices) {
744
const FuncExport& fe = funcExports[funcExportIndex];
745
746
DebugOnly<CodeRange> cr = segment->codeRanges()[interpRangeIndex];
747
MOZ_ASSERT(cr.value.isInterpEntry());
748
MOZ_ASSERT(cr.value.funcIndex() == fe.funcIndex());
749
750
LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex,
751
interpRangeIndex);
752
753
size_t exportIndex;
754
MOZ_ALWAYS_FALSE(BinarySearch(ProjectLazyFuncIndex(exports_), 0,
755
exports_.length(), fe.funcIndex(),
756
&exportIndex));
757
MOZ_ALWAYS_TRUE(
758
exports_.insert(exports_.begin() + exportIndex, std::move(lazyExport)));
759
760
// Functions with unsupported reftypes in their sig have only one entry
761
// (interp). All other functions get an extra jit entry.
762
interpRangeIndex +=
763
fe.funcType().temporarilyUnsupportedReftypeForEntry() ? 1 : 2;
764
}
765
766
return true;
767
}
768
769
bool LazyStubTier::createOne(uint32_t funcExportIndex,
770
const CodeTier& codeTier) {
771
Uint32Vector funcExportIndexes;
772
if (!funcExportIndexes.append(funcExportIndex)) {
773
return false;
774
}
775
776
size_t stubSegmentIndex;
777
if (!createMany(funcExportIndexes, codeTier, &stubSegmentIndex)) {
778
return false;
779
}
780
781
const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex];
782
const CodeRangeVector& codeRanges = segment->codeRanges();
783
784
// Functions that have unsupported reftypes in their sig don't get a jit
785
// entry.
786
if (codeTier.metadata()
787
.funcExports[funcExportIndex]
788
.funcType()
789
.temporarilyUnsupportedReftypeForEntry()) {
790
MOZ_ASSERT(codeRanges.length() >= 1);
791
MOZ_ASSERT(codeRanges.back().isInterpEntry());
792
return true;
793
}
794
795
MOZ_ASSERT(codeRanges.length() >= 2);
796
MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry());
797
798
const CodeRange& cr = codeRanges[codeRanges.length() - 1];
799
MOZ_ASSERT(cr.isJitEntry());
800
801
codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
802
return true;
803
}
804
805
bool LazyStubTier::createTier2(const Uint32Vector& funcExportIndices,
806
const CodeTier& codeTier,
807
Maybe<size_t>* outStubSegmentIndex) {
808
if (!funcExportIndices.length()) {
809
return true;
810
}
811
812
size_t stubSegmentIndex;
813
if (!createMany(funcExportIndices, codeTier, &stubSegmentIndex)) {
814
return false;
815
}
816
817
outStubSegmentIndex->emplace(stubSegmentIndex);
818
return true;
819
}
820
821
void LazyStubTier::setJitEntries(const Maybe<size_t>& stubSegmentIndex,
822
const Code& code) {
823
if (!stubSegmentIndex) {
824
return;
825
}
826
const UniqueLazyStubSegment& segment = stubSegments_[*stubSegmentIndex];
827
for (const CodeRange& cr : segment->codeRanges()) {
828
if (!cr.isJitEntry()) {
829
continue;
830
}
831
code.setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
832
}
833
}
834
835
bool LazyStubTier::hasStub(uint32_t funcIndex) const {
836
size_t match;
837
return BinarySearch(ProjectLazyFuncIndex(exports_), 0, exports_.length(),
838
funcIndex, &match);
839
}
840
841
void* LazyStubTier::lookupInterpEntry(uint32_t funcIndex) const {
842
size_t match;
843
if (!BinarySearch(ProjectLazyFuncIndex(exports_), 0, exports_.length(),
844
funcIndex, &match)) {
845
return nullptr;
846
}
847
const LazyFuncExport& fe = exports_[match];
848
const LazyStubSegment& stub = *stubSegments_[fe.lazyStubSegmentIndex];
849
return stub.base() + stub.codeRanges()[fe.funcCodeRangeIndex].begin();
850
}
851
852
void LazyStubTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
853
size_t* data) const {
854
*data += sizeof(*this);
855
*data += exports_.sizeOfExcludingThis(mallocSizeOf);
856
for (const UniqueLazyStubSegment& stub : stubSegments_) {
857
stub->addSizeOfMisc(mallocSizeOf, code, data);
858
}
859
}
860
861
bool MetadataTier::clone(const MetadataTier& src) {
862
if (!funcToCodeRange.appendAll(src.funcToCodeRange)) {
863
return false;
864
}
865
if (!codeRanges.appendAll(src.codeRanges)) {
866
return false;
867
}
868
if (!callSites.appendAll(src.callSites)) {
869
return false;
870
}
871
if (!debugTrapFarJumpOffsets.appendAll(src.debugTrapFarJumpOffsets)) {
872
return false;
873
}
874
875
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
876
if (!trapSites[trap].appendAll(src.trapSites[trap])) {
877
return false;
878
}
879
}
880
881
if (!funcImports.resize(src.funcImports.length())) {
882
return false;
883
}
884
for (size_t i = 0; i < src.funcImports.length(); i++) {
885
funcImports[i].clone(src.funcImports[i]);
886
}
887
888
if (!funcExports.resize(src.funcExports.length())) {
889
return false;
890
}
891
for (size_t i = 0; i < src.funcExports.length(); i++) {
892
funcExports[i].clone(src.funcExports[i]);
893
}
894
895
return true;
896
}
897
898
size_t Metadata::serializedSize() const {
899
return sizeof(pod()) + SerializedVectorSize(funcTypeIds) +
900
SerializedPodVectorSize(globals) + SerializedPodVectorSize(tables) +
901
sizeof(moduleName) + SerializedPodVectorSize(funcNames) +
902
filename.serializedSize() + sourceMapURL.serializedSize() +
903
sizeof(uint8_t);
904
}
905
906
uint8_t* Metadata::serialize(uint8_t* cursor) const {
907
MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() &&
908
debugFuncReturnTypes.empty());
909
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
910
cursor = SerializeVector(cursor, funcTypeIds);
911
cursor = SerializePodVector(cursor, globals);
912
cursor = SerializePodVector(cursor, tables);
913
cursor = WriteBytes(cursor, &moduleName, sizeof(moduleName));
914
cursor = SerializePodVector(cursor, funcNames);
915
cursor = filename.serialize(cursor);
916
cursor = sourceMapURL.serialize(cursor);
917
cursor = WriteScalar(cursor, uint8_t(omitsBoundsChecks));
918
return cursor;
919
}
920
921
/* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) {
922
uint8_t scalarOmitsBoundsChecks = 0;
923
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
924
(cursor = DeserializeVector(cursor, &funcTypeIds)) &&
925
(cursor = DeserializePodVector(cursor, &globals)) &&
926
(cursor = DeserializePodVector(cursor, &tables)) &&
927
(cursor = ReadBytes(cursor, &moduleName, sizeof(moduleName))) &&
928
(cursor = DeserializePodVector(cursor, &funcNames)) &&
929
(cursor = filename.deserialize(cursor)) &&
930
(cursor = sourceMapURL.deserialize(cursor)) &&
931
(cursor = ReadScalar<uint8_t>(cursor, &scalarOmitsBoundsChecks));
932
debugEnabled = false;
933
debugFuncArgTypes.clear();
934
debugFuncReturnTypes.clear();
935
omitsBoundsChecks = !!scalarOmitsBoundsChecks;
936
return cursor;
937
}
938
939
size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
940
return SizeOfVectorExcludingThis(funcTypeIds, mallocSizeOf) +
941
globals.sizeOfExcludingThis(mallocSizeOf) +
942
tables.sizeOfExcludingThis(mallocSizeOf) +
943
funcNames.sizeOfExcludingThis(mallocSizeOf) +
944
filename.sizeOfExcludingThis(mallocSizeOf) +
945
sourceMapURL.sizeOfExcludingThis(mallocSizeOf);
946
}
947
948
struct ProjectFuncIndex {
949
const FuncExportVector& funcExports;
950
explicit ProjectFuncIndex(const FuncExportVector& funcExports)
951
: funcExports(funcExports) {}
952
uint32_t operator[](size_t index) const {
953
return funcExports[index].funcIndex();
954
}
955
};
956
957
FuncExport& MetadataTier::lookupFuncExport(
958
uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) {
959
size_t match;
960
if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(),
961
funcIndex, &match)) {
962
MOZ_CRASH("missing function export");
963
}
964
if (funcExportIndex) {
965
*funcExportIndex = match;
966
}
967
return funcExports[match];
968
}
969
970
const FuncExport& MetadataTier::lookupFuncExport(
971
uint32_t funcIndex, size_t* funcExportIndex) const {
972
return const_cast<MetadataTier*>(this)->lookupFuncExport(funcIndex,
973
funcExportIndex);
974
}
975
976
static bool AppendName(const Bytes& namePayload, const Name& name,
977
UTF8Bytes* bytes) {
978
MOZ_RELEASE_ASSERT(name.offsetInNamePayload <= namePayload.length());
979
MOZ_RELEASE_ASSERT(name.length <=
980
namePayload.length() - name.offsetInNamePayload);
981
return bytes->append(
982
(const char*)namePayload.begin() + name.offsetInNamePayload, name.length);
983
}
984
985
static bool AppendFunctionIndexName(uint32_t funcIndex, UTF8Bytes* bytes) {
986
const char beforeFuncIndex[] = "wasm-function[";
987
const char afterFuncIndex[] = "]";
988
989
ToCStringBuf cbuf;
990
const char* funcIndexStr = NumberToCString(nullptr, &cbuf, funcIndex);
991
MOZ_ASSERT(funcIndexStr);
992
993
return bytes->append(beforeFuncIndex, strlen(beforeFuncIndex)) &&
994
bytes->append(funcIndexStr, strlen(funcIndexStr)) &&
995
bytes->append(afterFuncIndex, strlen(afterFuncIndex));
996
}
997
998
bool Metadata::getFuncName(NameContext ctx, uint32_t funcIndex,
999
UTF8Bytes* name) const {
1000
if (moduleName && moduleName->length != 0) {
1001
if (!AppendName(namePayload->bytes, *moduleName, name)) {
1002
return false;
1003
}
1004
if (!name->append('.')) {
1005
return false;
1006
}
1007
}
1008
1009
if (funcIndex < funcNames.length() && funcNames[funcIndex].length != 0) {
1010
return AppendName(namePayload->bytes, funcNames[funcIndex], name);
1011
}
1012
1013
if (ctx == NameContext::BeforeLocation) {
1014
return true;
1015
}
1016
1017
return AppendFunctionIndexName(funcIndex, name);
1018
}
1019
1020
bool CodeTier::initialize(const Code& code, const LinkData& linkData,
1021
const Metadata& metadata) {
1022
MOZ_ASSERT(!initialized());
1023
code_ = &code;
1024
1025
MOZ_ASSERT(lazyStubs_.lock()->empty());
1026
1027
// See comments in CodeSegment::initialize() for why this must be last.
1028
if (!segment_->initialize(*this, linkData, metadata, *metadata_)) {
1029
return false;
1030
}
1031
1032
MOZ_ASSERT(initialized());
1033
return true;
1034
}
1035
1036
size_t CodeTier::serializedSize() const {
1037
return segment_->serializedSize() + metadata_->serializedSize();
1038
}
1039
1040
uint8_t* CodeTier::serialize(uint8_t* cursor, const LinkData& linkData) const {
1041
cursor = metadata_->serialize(cursor);
1042
cursor = segment_->serialize(cursor, linkData);
1043
return cursor;
1044
}
1045
1046
/* static */ const uint8_t* CodeTier::deserialize(const uint8_t* cursor,
1047
const LinkData& linkData,
1048
UniqueCodeTier* codeTier) {
1049
auto metadata = js::MakeUnique<MetadataTier>(Tier::Serialized);
1050
if (!metadata) {
1051
return nullptr;
1052
}
1053
cursor = metadata->deserialize(cursor);
1054
if (!cursor) {
1055
return nullptr;
1056
}
1057
1058
UniqueModuleSegment segment;
1059
cursor = ModuleSegment::deserialize(cursor, linkData, &segment);
1060
if (!cursor) {
1061
return nullptr;
1062
}
1063
1064
*codeTier = js::MakeUnique<CodeTier>(std::move(metadata), std::move(segment));
1065
if (!*codeTier) {
1066
return nullptr;
1067
}
1068
1069
return cursor;
1070
}
1071
1072
void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
1073
size_t* data) const {
1074
segment_->addSizeOfMisc(mallocSizeOf, code, data);
1075
lazyStubs_.lock()->addSizeOfMisc(mallocSizeOf, code, data);
1076
*data += metadata_->sizeOfExcludingThis(mallocSizeOf);
1077
}
1078
1079
const CodeRange* CodeTier::lookupRange(const void* pc) const {
1080
CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base());
1081
return LookupInSorted(metadata_->codeRanges, target);
1082
}
1083
1084
bool JumpTables::init(CompileMode mode, const ModuleSegment& ms,
1085
const CodeRangeVector& codeRanges) {
1086
static_assert(JSScript::offsetOfJitCodeRaw() == 0,
1087
"wasm fast jit entry is at (void*) jit[funcIndex]");
1088
1089
mode_ = mode;
1090
1091
size_t numFuncs = 0;
1092
for (const CodeRange& cr : codeRanges) {
1093
if (cr.isFunction()) {
1094
numFuncs++;
1095
}
1096
}
1097
1098
numFuncs_ = numFuncs;
1099
1100
if (mode_ == CompileMode::Tier1) {
1101
tiering_ = TablePointer(js_pod_calloc<void*>(numFuncs));
1102
if (!tiering_) {
1103
return false;
1104
}
1105
}
1106
1107
// The number of jit entries is overestimated, but it is simpler when
1108
// filling/looking up the jit entries and safe (worst case we'll crash
1109
// because of a null deref when trying to call the jit entry of an
1110
// unexported function).
1111
jit_ = TablePointer(js_pod_calloc<void*>(numFuncs));
1112
if (!jit_) {
1113
return false;
1114
}
1115
1116
uint8_t* codeBase = ms.base();
1117
for (const CodeRange& cr : codeRanges) {
1118
if (cr.isFunction()) {
1119
setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
1120
} else if (cr.isJitEntry()) {
1121
setJitEntry(cr.funcIndex(), codeBase + cr.begin());
1122
}
1123
}
1124
return true;
1125
}
1126
1127
Code::Code(UniqueCodeTier tier1, const Metadata& metadata,
1128
JumpTables&& maybeJumpTables, StructTypeVector&& structTypes)
1129
: tier1_(std::move(tier1)),
1130
metadata_(&metadata),
1131
profilingLabels_(mutexid::WasmCodeProfilingLabels,
1132
CacheableCharsVector()),
1133
jumpTables_(std::move(maybeJumpTables)),
1134
structTypes_(std::move(structTypes)) {}
1135
1136
bool Code::initialize(const LinkData& linkData) {
1137
MOZ_ASSERT(!initialized());
1138
1139
if (!tier1_->initialize(*this, linkData, *metadata_)) {
1140
return false;
1141
}
1142
1143
MOZ_ASSERT(initialized());
1144
return true;
1145
}
1146
1147
bool Code::setTier2(UniqueCodeTier tier2, const LinkData& linkData) const {
1148
MOZ_RELEASE_ASSERT(!hasTier2());
1149
MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Optimized &&
1150
tier1_->tier() == Tier::Baseline);
1151
1152
if (!tier2->initialize(*this, linkData, *metadata_)) {
1153
return false;
1154
}
1155
1156
tier2_ = std::move(tier2);
1157
1158
return true;
1159
}
1160
1161
void Code::commitTier2() const {
1162
MOZ_RELEASE_ASSERT(!hasTier2());
1163
MOZ_RELEASE_ASSERT(tier2_.get());
1164
hasTier2_ = true;
1165
MOZ_ASSERT(hasTier2());
1166
}
1167
1168
uint32_t Code::getFuncIndex(JSFunction* fun) const {
1169
MOZ_ASSERT(fun->isWasm() || fun->isAsmJSNative());
1170
if (!fun->isWasmWithJitEntry()) {
1171
return fun->wasmFuncIndex();
1172
}
1173
return jumpTables_.funcIndexFromJitEntry(fun->wasmJitEntry());
1174
}
1175
1176
Tiers Code::tiers() const {
1177
if (hasTier2()) {
1178
return Tiers(tier1_->tier(), tier2_->tier());
1179
}
1180
return Tiers(tier1_->tier());
1181
}
1182
1183
bool Code::hasTier(Tier t) const {
1184
if (hasTier2() && tier2_->tier() == t) {
1185
return true;
1186
}
1187
return tier1_->tier() == t;
1188
}
1189
1190
Tier Code::stableTier() const { return tier1_->tier(); }
1191
1192
Tier Code::bestTier() const {
1193
if (hasTier2()) {
1194
return tier2_->tier();
1195
}
1196
return tier1_->tier();
1197
}
1198
1199
const CodeTier& Code::codeTier(Tier tier) const {
1200
switch (tier) {
1201
case Tier::Baseline:
1202
if (tier1_->tier() == Tier::Baseline) {
1203
MOZ_ASSERT(tier1_->initialized());
1204
return *tier1_;
1205
}
1206
MOZ_CRASH("No code segment at this tier");
1207
case Tier::Optimized:
1208
if (tier1_->tier() == Tier::Optimized) {
1209
MOZ_ASSERT(tier1_->initialized());
1210
return *tier1_;
1211
}
1212
if (tier2_) {
1213
MOZ_ASSERT(tier2_->initialized());
1214
return *tier2_;
1215
}
1216
MOZ_CRASH("No code segment at this tier");
1217
}
1218
MOZ_CRASH();
1219
}
1220
1221
bool Code::containsCodePC(const void* pc) const {
1222
for (Tier t : tiers()) {
1223
const ModuleSegment& ms = segment(t);
1224
if (ms.containsCodePC(pc)) {
1225
return true;
1226
}
1227
}
1228
return false;
1229
}
1230
1231
struct CallSiteRetAddrOffset {
1232
const CallSiteVector& callSites;
1233
explicit CallSiteRetAddrOffset(const CallSiteVector& callSites)
1234
: callSites(callSites) {}
1235
uint32_t operator[](size_t index) const {
1236
return callSites[index].returnAddressOffset();
1237
}
1238
};
1239
1240
const CallSite* Code::lookupCallSite(void* returnAddress) const {
1241
for (Tier t : tiers()) {
1242
uint32_t target = ((uint8_t*)returnAddress) - segment(t).base();
1243
size_t lowerBound = 0;
1244
size_t upperBound = metadata(t).callSites.length();
1245
1246
size_t match;
1247
if (BinarySearch(CallSiteRetAddrOffset(metadata(t).callSites), lowerBound,
1248
upperBound, target, &match))
1249
return &metadata(t).callSites[match];
1250
}
1251
1252
return nullptr;
1253
}
1254
1255
const CodeRange* Code::lookupFuncRange(void* pc) const {
1256
for (Tier t : tiers()) {
1257
const CodeRange* result = codeTier(t).lookupRange(pc);
1258
if (result && result->isFunction()) {
1259
return result;
1260
}
1261
}
1262
return nullptr;
1263
}
1264
1265
const StackMap* Code::lookupStackMap(uint8_t* nextPC) const {
1266
for (Tier t : tiers()) {
1267
const StackMap* result = metadata(t).stackMaps.findMap(nextPC);
1268
if (result) {
1269
return result;
1270
}
1271
}
1272
return nullptr;
1273
}
1274
1275
struct TrapSitePCOffset {
1276
const TrapSiteVector& trapSites;
1277
explicit TrapSitePCOffset(const TrapSiteVector& trapSites)
1278
: trapSites(trapSites) {}
1279
uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; }
1280
};
1281
1282
bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
1283
for (Tier t : tiers()) {
1284
const TrapSiteVectorArray& trapSitesArray = metadata(t).trapSites;
1285
for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
1286
const TrapSiteVector& trapSites = trapSitesArray[trap];
1287
1288
uint32_t target = ((uint8_t*)pc) - segment(t).base();
1289
size_t lowerBound = 0;
1290
size_t upperBound = trapSites.length();
1291
1292
size_t match;
1293
if (BinarySearch(TrapSitePCOffset(trapSites), lowerBound, upperBound,
1294
target, &match)) {
1295
MOZ_ASSERT(segment(t).containsCodePC(pc));
1296
*trapOut = trap;
1297
*bytecode = trapSites[match].bytecode;
1298
return true;
1299
}
1300
}
1301
}
1302
1303
return false;
1304
}
1305
1306
// When enabled, generate profiling labels for every name in funcNames_ that is
1307
// the name of some Function CodeRange. This involves malloc() so do it now
1308
// since, once we start sampling, we'll be in a signal-handing context where we
1309
// cannot malloc.
1310
void Code::ensureProfilingLabels(bool profilingEnabled) const {
1311
auto labels = profilingLabels_.lock();
1312
1313
if (!profilingEnabled) {
1314
labels->clear();
1315
return;
1316
}
1317
1318
if (!labels->empty()) {
1319
return;
1320
}
1321
1322
// Any tier will do, we only need tier-invariant data that are incidentally
1323
// stored with the code ranges.
1324
1325
for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) {
1326
if (!codeRange.isFunction()) {
1327
continue;
1328
}
1329
1330
ToCStringBuf cbuf;
1331
const char* bytecodeStr =
1332
NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
1333
MOZ_ASSERT(bytecodeStr);
1334
1335
UTF8Bytes name;
1336
if (!metadata().getFuncNameStandalone(codeRange.funcIndex(), &name)) {
1337
return;
1338
}
1339
if (!name.append(" (", 2)) {
1340
return;
1341
}
1342
1343
if (const char* filename = metadata().filename.get()) {
1344
if (!name.append(filename, strlen(filename))) {
1345
return;
1346
}
1347
} else {
1348
if (!name.append('?')) {
1349
return;
1350
}
1351
}
1352
1353
if (!name.append(':') || !name.append(bytecodeStr, strlen(bytecodeStr)) ||
1354
!name.append(")\0", 2)) {
1355
return;
1356
}
1357
1358
UniqueChars label(name.extractOrCopyRawBuffer());
1359
if (!label) {
1360
return;
1361
}
1362
1363
if (codeRange.funcIndex() >= labels->length()) {
1364
if (!labels->resize(codeRange.funcIndex() + 1)) {
1365
return;
1366
}
1367
}
1368
1369
((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label);
1370
}
1371
}
1372
1373
const char* Code::profilingLabel(uint32_t funcIndex) const {
1374
auto labels = profilingLabels_.lock();
1375
1376
if (funcIndex >= labels->length() ||
1377
!((CacheableCharsVector&)labels)[funcIndex]) {
1378
return "?";
1379
}
1380
return ((CacheableCharsVector&)labels)[funcIndex].get();
1381
}
1382
1383
void Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
1384
Metadata::SeenSet* seenMetadata,
1385
Code::SeenSet* seenCode, size_t* code,
1386
size_t* data) const {
1387
auto p = seenCode->lookupForAdd(this);
1388
if (p) {
1389
return;
1390
}
1391
bool ok = seenCode->add(p, this);
1392
(void)ok; // oh well
1393
1394
*data += mallocSizeOf(this) +
1395
metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) +
1396
profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) +
1397
jumpTables_.sizeOfMiscExcludingThis();
1398
1399
for (auto t : tiers()) {
1400
codeTier(t).addSizeOfMisc(mallocSizeOf, code, data);
1401
}
1402
*data += SizeOfVectorExcludingThis(structTypes_, mallocSizeOf);
1403
}
1404
1405
size_t Code::serializedSize() const {
1406
return metadata().serializedSize() +
1407
codeTier(Tier::Serialized).serializedSize() +
1408
SerializedVectorSize(structTypes_);
1409
}
1410
1411
uint8_t* Code::serialize(uint8_t* cursor, const LinkData& linkData) const {
1412
MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
1413
1414
cursor = metadata().serialize(cursor);
1415
cursor = codeTier(Tier::Serialized).serialize(cursor, linkData);
1416
cursor = SerializeVector(cursor, structTypes_);
1417
return cursor;
1418
}
1419
1420
/* static */ const uint8_t* Code::deserialize(const uint8_t* cursor,
1421
const LinkData& linkData,
1422
Metadata& metadata,
1423
SharedCode* out) {
1424
cursor = metadata.deserialize(cursor);
1425
if (!cursor) {
1426
return nullptr;
1427
}
1428
1429
UniqueCodeTier codeTier;
1430
cursor = CodeTier::deserialize(cursor, linkData, &codeTier);
1431
if (!cursor) {
1432
return nullptr;
1433
}
1434
1435
JumpTables jumpTables;
1436
if (!jumpTables.init(CompileMode::Once, codeTier->segment(),
1437
codeTier->metadata().codeRanges)) {
1438
return nullptr;
1439
}
1440
1441
StructTypeVector structTypes;
1442
cursor = DeserializeVector(cursor, &structTypes);
1443
if (!cursor) {
1444
return nullptr;
1445
}
1446
1447
MutableCode code =
1448
js_new<Code>(std::move(codeTier), metadata, std::move(jumpTables),
1449
std::move(structTypes));
1450
if (!code || !code->initialize(linkData)) {
1451
return nullptr;
1452
}
1453
1454
*out = code;
1455
return cursor;
1456
}
1457
1458
void wasm::PatchDebugSymbolicAccesses(uint8_t* codeBase, MacroAssembler& masm) {
1459
#ifdef WASM_CODEGEN_DEBUG
1460
for (auto& access : masm.symbolicAccesses()) {
1461
switch (access.target) {
1462
case SymbolicAddress::PrintI32:
1463
case SymbolicAddress::PrintPtr:
1464
case SymbolicAddress::PrintF32:
1465
case SymbolicAddress::PrintF64:
1466
case SymbolicAddress::PrintText:
1467
break;
1468
default:
1469
MOZ_CRASH("unexpected symbol in PatchDebugSymbolicAccesses");
1470
}
1471
ABIFunctionType abiType;
1472
void* target = AddressOf(access.target, &abiType);
1473
uint8_t* patchAt = codeBase + access.patchAt.offset();
1474
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
1475
PatchedImmPtr(target),
1476
PatchedImmPtr((void*)-1));
1477
}
1478
#else
1479
MOZ_ASSERT(masm.symbolicAccesses().empty());
1480
#endif
1481
}