Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2016 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#ifndef wasm_code_h
20
#define wasm_code_h
21
22
#include "jit/shared/Assembler-shared.h"
23
#include "js/HashTable.h"
24
#include "threading/ExclusiveData.h"
25
#include "util/Memory.h"
26
#include "vm/MutexIDs.h"
27
#include "wasm/WasmGC.h"
28
#include "wasm/WasmTypes.h"
29
30
namespace js {
31
32
struct AsmJSMetadata;
33
34
namespace wasm {
35
36
struct MetadataTier;
37
struct Metadata;
38
39
// LinkData contains all the metadata necessary to patch all the locations
40
// that depend on the absolute address of a ModuleSegment. This happens in a
41
// "linking" step after compilation and after the module's code is serialized.
42
// The LinkData is serialized along with the Module but does not (normally, see
43
// Module::debugLinkData_ comment) persist after (de)serialization, which
44
// distinguishes it from Metadata, which is stored in the Code object.
45
46
struct LinkDataCacheablePod {
47
uint32_t trapOffset = 0;
48
49
LinkDataCacheablePod() = default;
50
};
51
52
struct LinkData : LinkDataCacheablePod {
53
const Tier tier;
54
55
explicit LinkData(Tier tier) : tier(tier) {}
56
57
LinkDataCacheablePod& pod() { return *this; }
58
const LinkDataCacheablePod& pod() const { return *this; }
59
60
struct InternalLink {
61
uint32_t patchAtOffset;
62
uint32_t targetOffset;
63
#ifdef JS_CODELABEL_LINKMODE
64
uint32_t mode;
65
#endif
66
};
67
typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
68
69
struct SymbolicLinkArray
70
: EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
71
WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
72
};
73
74
InternalLinkVector internalLinks;
75
SymbolicLinkArray symbolicLinks;
76
77
WASM_DECLARE_SERIALIZABLE(LinkData)
78
};
79
80
typedef UniquePtr<LinkData> UniqueLinkData;
81
82
// Executable code must be deallocated specially.
83
84
struct FreeCode {
85
uint32_t codeLength;
86
FreeCode() : codeLength(0) {}
87
explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
88
void operator()(uint8_t* codeBytes);
89
};
90
91
using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
92
93
class Code;
94
class CodeTier;
95
class ModuleSegment;
96
class LazyStubSegment;
97
98
// CodeSegment contains common helpers for determining the base and length of a
99
// code segment and if a pc belongs to this segment. It is inherited by:
100
// - ModuleSegment, i.e. the code segment of a Module, generated
101
// eagerly when a Module is instanciated.
102
// - LazyStubSegment, i.e. the code segment of entry stubs that are lazily
103
// generated.
104
105
class CodeSegment {
106
protected:
107
static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
108
109
enum class Kind { LazyStubs, Module };
110
111
CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
112
: bytes_(std::move(bytes)),
113
length_(length),
114
kind_(kind),
115
codeTier_(nullptr),
116
unregisterOnDestroy_(false) {}
117
118
bool initialize(const CodeTier& codeTier);
119
120
private:
121
const UniqueCodeBytes bytes_;
122
const uint32_t length_;
123
const Kind kind_;
124
const CodeTier* codeTier_;
125
bool unregisterOnDestroy_;
126
127
public:
128
bool initialized() const { return !!codeTier_; }
129
~CodeSegment();
130
131
bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
132
bool isModule() const { return kind_ == Kind::Module; }
133
const ModuleSegment* asModule() const {
134
MOZ_ASSERT(isModule());
135
return (ModuleSegment*)this;
136
}
137
const LazyStubSegment* asLazyStub() const {
138
MOZ_ASSERT(isLazyStubs());
139
return (LazyStubSegment*)this;
140
}
141
142
uint8_t* base() const { return bytes_.get(); }
143
uint32_t length() const {
144
MOZ_ASSERT(length_ != UINT32_MAX);
145
return length_;
146
}
147
148
bool containsCodePC(const void* pc) const {
149
return pc >= base() && pc < (base() + length_);
150
}
151
152
const CodeTier& codeTier() const {
153
MOZ_ASSERT(initialized());
154
return *codeTier_;
155
}
156
const Code& code() const;
157
158
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
159
};
160
161
// A wasm ModuleSegment owns the allocated executable code for a wasm module.
162
163
typedef UniquePtr<ModuleSegment> UniqueModuleSegment;
164
165
class ModuleSegment : public CodeSegment {
166
const Tier tier_;
167
uint8_t* const trapCode_;
168
169
public:
170
ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength,
171
const LinkData& linkData);
172
173
static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm,
174
const LinkData& linkData);
175
static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes,
176
const LinkData& linkData);
177
178
bool initialize(const CodeTier& codeTier, const LinkData& linkData,
179
const Metadata& metadata, const MetadataTier& metadataTier);
180
181
Tier tier() const { return tier_; }
182
183
// Pointers to stubs to which PC is redirected from the signal-handler.
184
185
uint8_t* trapCode() const { return trapCode_; }
186
187
// Structured clone support:
188
189
size_t serializedSize() const;
190
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
191
static const uint8_t* deserialize(const uint8_t* cursor,
192
const LinkData& linkData,
193
UniqueModuleSegment* segment);
194
195
const CodeRange* lookupRange(const void* pc) const;
196
197
void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
198
size_t* data) const;
199
};
200
201
// A FuncExport represents a single function definition inside a wasm Module
202
// that has been exported one or more times. A FuncExport represents an
203
// internal entry point that can be called via function definition index by
204
// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
205
// function definition index, the FuncExportVector is stored sorted by
206
// function definition index.
207
208
class FuncExport {
209
FuncType funcType_;
210
MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
211
uint32_t funcIndex_;
212
uint32_t eagerInterpEntryOffset_; // Machine code offset
213
bool hasEagerStubs_;
214
} pod;
215
216
public:
217
FuncExport() = default;
218
explicit FuncExport(FuncType&& funcType, uint32_t funcIndex,
219
bool hasEagerStubs)
220
: funcType_(std::move(funcType)) {
221
pod.funcIndex_ = funcIndex;
222
pod.eagerInterpEntryOffset_ = UINT32_MAX;
223
pod.hasEagerStubs_ = hasEagerStubs;
224
}
225
void initEagerInterpEntryOffset(uint32_t entryOffset) {
226
MOZ_ASSERT(pod.eagerInterpEntryOffset_ == UINT32_MAX);
227
MOZ_ASSERT(hasEagerStubs());
228
pod.eagerInterpEntryOffset_ = entryOffset;
229
}
230
231
bool hasEagerStubs() const { return pod.hasEagerStubs_; }
232
const FuncType& funcType() const { return funcType_; }
233
uint32_t funcIndex() const { return pod.funcIndex_; }
234
uint32_t eagerInterpEntryOffset() const {
235
MOZ_ASSERT(pod.eagerInterpEntryOffset_ != UINT32_MAX);
236
MOZ_ASSERT(hasEagerStubs());
237
return pod.eagerInterpEntryOffset_;
238
}
239
240
bool canHaveJitEntry() const {
241
return !funcType_.temporarilyUnsupportedReftypeForEntry() &&
242
JitOptions.enableWasmJitEntry;
243
}
244
245
bool clone(const FuncExport& src) {
246
mozilla::PodAssign(&pod, &src.pod);
247
return funcType_.clone(src.funcType_);
248
}
249
250
WASM_DECLARE_SERIALIZABLE(FuncExport)
251
};
252
253
typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
254
255
// An FuncImport contains the runtime metadata needed to implement a call to an
256
// imported function. Each function import has two call stubs: an optimized path
257
// into JIT code and a slow path into the generic C++ js::Invoke and these
258
// offsets of these stubs are stored so that function-import callsites can be
259
// dynamically patched at runtime.
260
261
class FuncImport {
262
FuncType funcType_;
263
struct CacheablePod {
264
uint32_t tlsDataOffset_;
265
uint32_t interpExitCodeOffset_; // Machine code offset
266
uint32_t jitExitCodeOffset_; // Machine code offset
267
} pod;
268
269
public:
270
FuncImport() { memset(&pod, 0, sizeof(CacheablePod)); }
271
272
FuncImport(FuncType&& funcType, uint32_t tlsDataOffset)
273
: funcType_(std::move(funcType)) {
274
pod.tlsDataOffset_ = tlsDataOffset;
275
pod.interpExitCodeOffset_ = 0;
276
pod.jitExitCodeOffset_ = 0;
277
}
278
279
void initInterpExitOffset(uint32_t off) {
280
MOZ_ASSERT(!pod.interpExitCodeOffset_);
281
pod.interpExitCodeOffset_ = off;
282
}
283
void initJitExitOffset(uint32_t off) {
284
MOZ_ASSERT(!pod.jitExitCodeOffset_);
285
pod.jitExitCodeOffset_ = off;
286
}
287
288
const FuncType& funcType() const { return funcType_; }
289
uint32_t tlsDataOffset() const { return pod.tlsDataOffset_; }
290
uint32_t interpExitCodeOffset() const { return pod.interpExitCodeOffset_; }
291
uint32_t jitExitCodeOffset() const { return pod.jitExitCodeOffset_; }
292
293
bool clone(const FuncImport& src) {
294
mozilla::PodAssign(&pod, &src.pod);
295
return funcType_.clone(src.funcType_);
296
}
297
298
WASM_DECLARE_SERIALIZABLE(FuncImport)
299
};
300
301
typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector;
302
303
// Metadata holds all the data that is needed to describe compiled wasm code
304
// at runtime (as opposed to data that is only used to statically link or
305
// instantiate a module).
306
//
307
// Metadata is built incrementally by ModuleGenerator and then shared immutably
308
// between modules.
309
//
310
// The Metadata structure is split into tier-invariant and tier-variant parts;
311
// the former points to instances of the latter. Additionally, the asm.js
312
// subsystem subclasses the Metadata, adding more tier-invariant data, some of
313
// which is serialized. See AsmJS.cpp.
314
315
struct MetadataCacheablePod {
316
ModuleKind kind;
317
MemoryUsage memoryUsage;
318
uint32_t minMemoryLength;
319
uint32_t globalDataLength;
320
Maybe<uint32_t> maxMemoryLength;
321
Maybe<uint32_t> startFuncIndex;
322
Maybe<uint32_t> nameCustomSectionIndex;
323
bool filenameIsURL;
324
325
explicit MetadataCacheablePod(ModuleKind kind)
326
: kind(kind),
327
memoryUsage(MemoryUsage::None),
328
minMemoryLength(0),
329
globalDataLength(0),
330
filenameIsURL(false) {}
331
};
332
333
typedef uint8_t ModuleHash[8];
334
typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncArgTypesVector;
335
typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncReturnTypesVector;
336
337
struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
338
FuncTypeWithIdVector funcTypeIds;
339
GlobalDescVector globals;
340
TableDescVector tables;
341
CacheableChars filename;
342
CacheableChars sourceMapURL;
343
bool omitsBoundsChecks;
344
345
// namePayload points at the name section's CustomSection::payload so that
346
// the Names (which are use payload-relative offsets) can be used
347
// independently of the Module without duplicating the name section.
348
SharedBytes namePayload;
349
Maybe<Name> moduleName;
350
NameVector funcNames;
351
352
// Debug-enabled code is not serialized.
353
bool debugEnabled;
354
FuncArgTypesVector debugFuncArgTypes;
355
FuncReturnTypesVector debugFuncReturnTypes;
356
ModuleHash debugHash;
357
358
// Feature flag that gets copied from ModuleEnvironment for BigInt support.
359
bool bigIntEnabled;
360
361
explicit Metadata(ModuleKind kind = ModuleKind::Wasm)
362
: MetadataCacheablePod(kind), debugEnabled(false), debugHash() {}
363
virtual ~Metadata() {}
364
365
MetadataCacheablePod& pod() { return *this; }
366
const MetadataCacheablePod& pod() const { return *this; }
367
368
bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
369
bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
370
371
// AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
372
// encapsulated within AsmJS.cpp, but the additional virtual functions allow
373
// asm.js to override wasm behavior in the handful of cases that can't be
374
// easily encapsulated by AsmJS.cpp.
375
376
bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
377
const AsmJSMetadata& asAsmJS() const {
378
MOZ_ASSERT(isAsmJS());
379
return *(const AsmJSMetadata*)this;
380
}
381
virtual bool mutedErrors() const { return false; }
382
virtual const char16_t* displayURL() const { return nullptr; }
383
virtual ScriptSource* maybeScriptSource() const { return nullptr; }
384
385
// The Developer-Facing Display Conventions section of the WebAssembly Web
386
// API spec defines two cases for displaying a wasm function name:
387
// 1. the function name stands alone
388
// 2. the function name precedes the location
389
390
enum NameContext { Standalone, BeforeLocation };
391
392
virtual bool getFuncName(NameContext ctx, uint32_t funcIndex,
393
UTF8Bytes* name) const;
394
395
bool getFuncNameStandalone(uint32_t funcIndex, UTF8Bytes* name) const {
396
return getFuncName(NameContext::Standalone, funcIndex, name);
397
}
398
bool getFuncNameBeforeLocation(uint32_t funcIndex, UTF8Bytes* name) const {
399
return getFuncName(NameContext::BeforeLocation, funcIndex, name);
400
}
401
402
WASM_DECLARE_SERIALIZABLE(Metadata);
403
};
404
405
typedef RefPtr<Metadata> MutableMetadata;
406
typedef RefPtr<const Metadata> SharedMetadata;
407
408
struct MetadataTier {
409
explicit MetadataTier(Tier tier) : tier(tier) {}
410
411
const Tier tier;
412
413
Uint32Vector funcToCodeRange;
414
CodeRangeVector codeRanges;
415
CallSiteVector callSites;
416
TrapSiteVectorArray trapSites;
417
FuncImportVector funcImports;
418
FuncExportVector funcExports;
419
StackMaps stackMaps;
420
421
// Debug information, not serialized.
422
Uint32Vector debugTrapFarJumpOffsets;
423
424
FuncExport& lookupFuncExport(uint32_t funcIndex,
425
size_t* funcExportIndex = nullptr);
426
const FuncExport& lookupFuncExport(uint32_t funcIndex,
427
size_t* funcExportIndex = nullptr) const;
428
429
const CodeRange& codeRange(const FuncExport& funcExport) const {
430
return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
431
}
432
433
bool clone(const MetadataTier& src);
434
435
WASM_DECLARE_SERIALIZABLE(MetadataTier);
436
};
437
438
using UniqueMetadataTier = UniquePtr<MetadataTier>;
439
440
// LazyStubSegment is a code segment lazily generated for function entry stubs
441
// (both interpreter and jit ones).
442
//
443
// Because a stub is usually small (a few KiB) and an executable code segment
444
// isn't (64KiB), a given stub segment can contain entry stubs of many
445
// functions.
446
447
using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
448
using LazyStubSegmentVector =
449
Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
450
451
class LazyStubSegment : public CodeSegment {
452
CodeRangeVector codeRanges_;
453
size_t usedBytes_;
454
455
public:
456
LazyStubSegment(UniqueCodeBytes bytes, size_t length)
457
: CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs),
458
usedBytes_(0) {}
459
460
static UniqueLazyStubSegment create(const CodeTier& codeTier,
461
size_t codeLength);
462
463
static size_t AlignBytesNeeded(size_t bytes) {
464
return AlignBytes(bytes, gc::SystemPageSize());
465
}
466
467
bool hasSpace(size_t bytes) const;
468
bool addStubs(size_t codeLength, const Uint32Vector& funcExportIndices,
469
const FuncExportVector& funcExports,
470
const CodeRangeVector& codeRanges, uint8_t** codePtr,
471
size_t* indexFirstInsertedCodeRange);
472
473
const CodeRangeVector& codeRanges() const { return codeRanges_; }
474
const CodeRange* lookupRange(const void* pc) const;
475
476
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
477
size_t* data) const;
478
};
479
480
// LazyFuncExport helps to efficiently lookup a CodeRange from a given function
481
// index. It is inserted in a vector sorted by function index, to perform
482
// binary search on it later.
483
484
struct LazyFuncExport {
485
size_t funcIndex;
486
size_t lazyStubSegmentIndex;
487
size_t funcCodeRangeIndex;
488
LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex,
489
size_t funcCodeRangeIndex)
490
: funcIndex(funcIndex),
491
lazyStubSegmentIndex(lazyStubSegmentIndex),
492
funcCodeRangeIndex(funcCodeRangeIndex) {}
493
};
494
495
using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
496
497
// LazyStubTier contains all the necessary information for lazy function entry
498
// stubs that are generated at runtime. None of its data is ever serialized.
499
//
500
// It must be protected by a lock, because the main thread can both read and
501
// write lazy stubs at any time while a background thread can regenerate lazy
502
// stubs for tier2 at any time.
503
504
class LazyStubTier {
505
LazyStubSegmentVector stubSegments_;
506
LazyFuncExportVector exports_;
507
size_t lastStubSegmentIndex_;
508
509
bool createMany(const Uint32Vector& funcExportIndices,
510
const CodeTier& codeTier, size_t* stubSegmentIndex);
511
512
public:
513
LazyStubTier() : lastStubSegmentIndex_(0) {}
514
515
bool empty() const { return stubSegments_.empty(); }
516
bool hasStub(uint32_t funcIndex) const;
517
518
// Returns a pointer to the raw interpreter entry of a given function which
519
// stubs have been lazily generated.
520
void* lookupInterpEntry(uint32_t funcIndex) const;
521
522
// Creates one lazy stub for the exported function, for which the jit entry
523
// will be set to the lazily-generated one.
524
bool createOne(uint32_t funcExportIndex, const CodeTier& codeTier);
525
526
// Create one lazy stub for all the functions in funcExportIndices, putting
527
// them in a single stub. Jit entries won't be used until
528
// setJitEntries() is actually called, after the Code owner has committed
529
// tier2.
530
bool createTier2(const Uint32Vector& funcExportIndices,
531
const CodeTier& codeTier, Maybe<size_t>* stubSegmentIndex);
532
void setJitEntries(const Maybe<size_t>& stubSegmentIndex, const Code& code);
533
534
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
535
size_t* data) const;
536
};
537
538
// CodeTier contains all the data related to a given compilation tier. It is
539
// built during module generation and then immutably stored in a Code.
540
541
typedef UniquePtr<CodeTier> UniqueCodeTier;
542
typedef UniquePtr<const CodeTier> UniqueConstCodeTier;
543
544
class CodeTier {
545
const Code* code_;
546
547
// Serialized information.
548
const UniqueMetadataTier metadata_;
549
const UniqueModuleSegment segment_;
550
551
// Lazy stubs, not serialized.
552
ExclusiveData<LazyStubTier> lazyStubs_;
553
554
static const MutexId& mutexForTier(Tier tier) {
555
if (tier == Tier::Baseline) {
556
return mutexid::WasmLazyStubsTier1;
557
}
558
MOZ_ASSERT(tier == Tier::Optimized);
559
return mutexid::WasmLazyStubsTier2;
560
}
561
562
public:
563
CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
564
: code_(nullptr),
565
metadata_(std::move(metadata)),
566
segment_(std::move(segment)),
567
lazyStubs_(mutexForTier(segment_->tier())) {}
568
569
bool initialized() const { return !!code_ && segment_->initialized(); }
570
bool initialize(const Code& code, const LinkData& linkData,
571
const Metadata& metadata);
572
573
Tier tier() const { return segment_->tier(); }
574
const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
575
const MetadataTier& metadata() const { return *metadata_.get(); }
576
const ModuleSegment& segment() const { return *segment_.get(); }
577
const Code& code() const {
578
MOZ_ASSERT(initialized());
579
return *code_;
580
}
581
582
const CodeRange* lookupRange(const void* pc) const;
583
584
size_t serializedSize() const;
585
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
586
static const uint8_t* deserialize(const uint8_t* cursor,
587
const LinkData& linkData,
588
UniqueCodeTier* codeTier);
589
void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
590
size_t* data) const;
591
};
592
593
// Jump tables to take tiering into account, when calling either from wasm to
594
// wasm (through rabaldr) or from jit to wasm (jit entry).
595
596
class JumpTables {
597
using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
598
599
CompileMode mode_;
600
TablePointer tiering_;
601
TablePointer jit_;
602
size_t numFuncs_;
603
604
public:
605
bool init(CompileMode mode, const ModuleSegment& ms,
606
const CodeRangeVector& codeRanges);
607
608
void setJitEntry(size_t i, void* target) const {
609
// Make sure that write is atomic; see comment in wasm::Module::finishTier2
610
// to that effect.
611
MOZ_ASSERT(i < numFuncs_);
612
jit_.get()[i] = target;
613
}
614
void** getAddressOfJitEntry(size_t i) const {
615
MOZ_ASSERT(i < numFuncs_);
616
MOZ_ASSERT(jit_.get()[i]);
617
return &jit_.get()[i];
618
}
619
size_t funcIndexFromJitEntry(void** target) const {
620
MOZ_ASSERT(target >= &jit_.get()[0]);
621
MOZ_ASSERT(target <= &(jit_.get()[numFuncs_ - 1]));
622
return (intptr_t*)target - (intptr_t*)&jit_.get()[0];
623
}
624
625
void setTieringEntry(size_t i, void* target) const {
626
MOZ_ASSERT(i < numFuncs_);
627
// See comment in wasm::Module::finishTier2.
628
if (mode_ == CompileMode::Tier1) {
629
tiering_.get()[i] = target;
630
}
631
}
632
void** tiering() const { return tiering_.get(); }
633
634
size_t sizeOfMiscExcludingThis() const {
635
// 2 words per function for the jit entry table, plus maybe 1 per
636
// function if we're tiering.
637
return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
638
}
639
};
640
641
// Code objects own executable code and the metadata that describe it. A single
642
// Code object is normally shared between a module and all its instances.
643
//
644
// profilingLabels_ is lazily initialized, but behind a lock.
645
646
typedef RefPtr<const Code> SharedCode;
647
typedef RefPtr<Code> MutableCode;
648
649
class Code : public ShareableBase<Code> {
650
UniqueCodeTier tier1_;
651
mutable UniqueConstCodeTier tier2_; // Access only when hasTier2() is true
652
mutable Atomic<bool> hasTier2_;
653
SharedMetadata metadata_;
654
ExclusiveData<CacheableCharsVector> profilingLabels_;
655
JumpTables jumpTables_;
656
StructTypeVector structTypes_;
657
658
public:
659
Code(UniqueCodeTier tier1, const Metadata& metadata,
660
JumpTables&& maybeJumpTables, StructTypeVector&& structTypes);
661
bool initialized() const { return tier1_->initialized(); }
662
663
bool initialize(const LinkData& linkData);
664
665
void setTieringEntry(size_t i, void* target) const {
666
jumpTables_.setTieringEntry(i, target);
667
}
668
void** tieringJumpTable() const { return jumpTables_.tiering(); }
669
670
void setJitEntry(size_t i, void* target) const {
671
jumpTables_.setJitEntry(i, target);
672
}
673
void** getAddressOfJitEntry(size_t i) const {
674
return jumpTables_.getAddressOfJitEntry(i);
675
}
676
uint32_t getFuncIndex(JSFunction* fun) const;
677
678
bool setTier2(UniqueCodeTier tier2, const LinkData& linkData) const;
679
void commitTier2() const;
680
681
bool hasTier2() const { return hasTier2_; }
682
Tiers tiers() const;
683
bool hasTier(Tier t) const;
684
685
Tier stableTier() const; // This is stable during a run
686
Tier bestTier()
687
const; // This may transition from Baseline -> Ion at any time
688
689
const CodeTier& codeTier(Tier tier) const;
690
const Metadata& metadata() const { return *metadata_; }
691
const StructTypeVector& structTypes() const { return structTypes_; }
692
693
const ModuleSegment& segment(Tier iter) const {
694
return codeTier(iter).segment();
695
}
696
const MetadataTier& metadata(Tier iter) const {
697
return codeTier(iter).metadata();
698
}
699
700
// Metadata lookup functions:
701
702
const CallSite* lookupCallSite(void* returnAddress) const;
703
const CodeRange* lookupFuncRange(void* pc) const;
704
const StackMap* lookupStackMap(uint8_t* nextPC) const;
705
bool containsCodePC(const void* pc) const;
706
bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
707
708
// To save memory, profilingLabels_ are generated lazily when profiling mode
709
// is enabled.
710
711
void ensureProfilingLabels(bool profilingEnabled) const;
712
const char* profilingLabel(uint32_t funcIndex) const;
713
714
// about:memory reporting:
715
716
void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
717
Metadata::SeenSet* seenMetadata,
718
Code::SeenSet* seenCode, size_t* code,
719
size_t* data) const;
720
721
// A Code object is serialized as the length and bytes of the machine code
722
// after statically unlinking it; the Code is then later recreated from the
723
// machine code and other parts.
724
725
size_t serializedSize() const;
726
uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
727
static const uint8_t* deserialize(const uint8_t* cursor,
728
const LinkData& linkData,
729
Metadata& metadata, SharedCode* code);
730
};
731
732
void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
733
734
} // namespace wasm
735
} // namespace js
736
737
#endif // wasm_code_h