Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2014 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmFrameIter.h"
20
21
#include "vm/JitActivation.h" // js::jit::JitActivation
22
#include "wasm/WasmInstance.h"
23
#include "wasm/WasmStubs.h"
24
25
#include "jit/MacroAssembler-inl.h"
26
27
using namespace js;
28
using namespace js::jit;
29
using namespace js::wasm;
30
31
using mozilla::DebugOnly;
32
using mozilla::Maybe;
33
34
/*****************************************************************************/
35
// WasmFrameIter implementation
36
37
WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
38
: activation_(activation),
39
code_(nullptr),
40
codeRange_(nullptr),
41
lineOrBytecode_(0),
42
fp_(fp ? fp : activation->wasmExitFP()),
43
unwoundIonCallerFP_(nullptr),
44
unwoundIonFrameType_(jit::FrameType(-1)),
45
unwind_(Unwind::False),
46
unwoundAddressOfReturnAddress_(nullptr),
47
resumePCinCurrentFrame_(nullptr) {
48
MOZ_ASSERT(fp_);
49
50
// When the stack is captured during a trap (viz., to create the .stack
51
// for an Error object), use the pc/bytecode information captured by the
52
// signal handler in the runtime. Take care not to use this trap unwind
53
// state for wasm frames in the middle of a JitActivation, i.e., wasm frames
54
// that called into JIT frames before the trap.
55
56
if (activation->isWasmTrapping() && fp_ == activation->wasmExitFP()) {
57
const TrapData& trapData = activation->wasmTrapData();
58
void* unwoundPC = trapData.unwoundPC;
59
60
code_ = &fp_->tls->instance->code();
61
MOZ_ASSERT(code_ == LookupCode(unwoundPC));
62
63
codeRange_ = code_->lookupFuncRange(unwoundPC);
64
MOZ_ASSERT(codeRange_);
65
66
lineOrBytecode_ = trapData.bytecodeOffset;
67
68
MOZ_ASSERT(!done());
69
return;
70
}
71
72
// Otherwise, execution exits wasm code via an exit stub which sets exitFP
73
// to the exit stub's frame. Thus, in this case, we want to start iteration
74
// at the caller of the exit frame, whose Code, CodeRange and CallSite are
75
// indicated by the returnAddress of the exit stub's frame. If the caller
76
// was Ion, we can just skip the wasm frames.
77
78
popFrame();
79
MOZ_ASSERT(!done() || unwoundIonCallerFP_);
80
}
81
82
bool WasmFrameIter::done() const {
83
MOZ_ASSERT(!!fp_ == !!code_);
84
MOZ_ASSERT(!!fp_ == !!codeRange_);
85
return !fp_;
86
}
87
88
void WasmFrameIter::operator++() {
89
MOZ_ASSERT(!done());
90
91
// When the iterator is set to unwind, each time the iterator pops a frame,
92
// the JitActivation is updated so that the just-popped frame is no longer
93
// visible. This is necessary since Debugger::onLeaveFrame is called before
94
// popping each frame and, once onLeaveFrame is called for a given frame,
95
// that frame must not be visible to subsequent stack iteration (or it
96
// could be added as a "new" frame just as it becomes garbage). When the
97
// frame is trapping, then exitFP is included in the callstack (otherwise,
98
// it is skipped, as explained above). So to unwind the innermost frame, we
99
// just clear the trapping state.
100
101
if (unwind_ == Unwind::True) {
102
if (activation_->isWasmTrapping()) {
103
activation_->finishWasmTrap();
104
}
105
activation_->setWasmExitFP(fp_);
106
}
107
108
popFrame();
109
}
110
111
void WasmFrameIter::popFrame() {
112
Frame* prevFP = fp_;
113
fp_ = prevFP->callerFP;
114
resumePCinCurrentFrame_ = (uint8_t*)prevFP->returnAddress;
115
116
if (uintptr_t(fp_) & ExitOrJitEntryFPTag) {
117
// We just unwound a frame pointer which has the low bit set,
118
// indicating this is a direct call from the jit into the wasm
119
// function's body. The call stack resembles this at this point:
120
//
121
// |---------------------|
122
// | JIT FRAME |
123
// | JIT FAKE EXIT FRAME | <-- tagged fp_
124
// | WASM FRAME | <-- prevFP (already unwound)
125
// |---------------------|
126
//
127
// fp_ points to the fake exit frame set up by the jit caller, and the
128
// return-address-to-fp is in JIT code, thus doesn't belong to any wasm
129
// instance's code (in particular, there's no associated CodeRange).
130
// Mark the frame as such and untag FP.
131
MOZ_ASSERT(!LookupCode(prevFP->returnAddress));
132
133
unwoundIonCallerFP_ =
134
(uint8_t*)(uintptr_t(fp_) & ~uintptr_t(ExitOrJitEntryFPTag));
135
unwoundIonFrameType_ = FrameType::Exit;
136
137
fp_ = nullptr;
138
code_ = nullptr;
139
codeRange_ = nullptr;
140
141
if (unwind_ == Unwind::True) {
142
activation_->setJSExitFP(unwoundIonCallerFP_);
143
unwoundAddressOfReturnAddress_ = &prevFP->returnAddress;
144
}
145
146
MOZ_ASSERT(done());
147
return;
148
}
149
150
if (!fp_) {
151
code_ = nullptr;
152
codeRange_ = nullptr;
153
154
if (unwind_ == Unwind::True) {
155
// We're exiting via the interpreter entry; we can safely reset
156
// exitFP.
157
activation_->setWasmExitFP(nullptr);
158
unwoundAddressOfReturnAddress_ = &prevFP->returnAddress;
159
}
160
161
MOZ_ASSERT(done());
162
return;
163
}
164
165
void* returnAddress = prevFP->returnAddress;
166
167
code_ = LookupCode(returnAddress, &codeRange_);
168
MOZ_ASSERT(codeRange_);
169
170
if (codeRange_->isJitEntry()) {
171
// This wasm function has been called through the generic JIT entry by
172
// a JIT caller, so the call stack resembles this:
173
//
174
// |---------------------|
175
// | JIT FRAME |
176
// | JSJIT TO WASM EXIT | <-- fp_
177
// | WASM JIT ENTRY | <-- prevFP (already unwound)
178
// | WASM FRAME | (already unwound)
179
// |---------------------|
180
//
181
// The next value of FP is just a regular jit frame used as a marker to
182
// know that we should transition to a JSJit frame iterator.
183
unwoundIonCallerFP_ = (uint8_t*)fp_;
184
unwoundIonFrameType_ = FrameType::JSJitToWasm;
185
186
fp_ = nullptr;
187
code_ = nullptr;
188
codeRange_ = nullptr;
189
190
if (unwind_ == Unwind::True) {
191
activation_->setJSExitFP(unwoundIonCallerFP_);
192
unwoundAddressOfReturnAddress_ = &prevFP->returnAddress;
193
}
194
195
MOZ_ASSERT(done());
196
return;
197
}
198
199
MOZ_ASSERT(code_ == &fp_->tls->instance->code());
200
MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
201
202
const CallSite* callsite = code_->lookupCallSite(returnAddress);
203
MOZ_ASSERT(callsite);
204
205
lineOrBytecode_ = callsite->lineOrBytecode();
206
207
MOZ_ASSERT(!done());
208
}
209
210
const char* WasmFrameIter::filename() const {
211
MOZ_ASSERT(!done());
212
return code_->metadata().filename.get();
213
}
214
215
const char16_t* WasmFrameIter::displayURL() const {
216
MOZ_ASSERT(!done());
217
return code_->metadata().displayURL();
218
}
219
220
bool WasmFrameIter::mutedErrors() const {
221
MOZ_ASSERT(!done());
222
return code_->metadata().mutedErrors();
223
}
224
225
JSAtom* WasmFrameIter::functionDisplayAtom() const {
226
MOZ_ASSERT(!done());
227
228
JSContext* cx = activation_->cx();
229
JSAtom* atom = instance()->getFuncDisplayAtom(cx, codeRange_->funcIndex());
230
if (!atom) {
231
cx->clearPendingException();
232
return cx->names().empty;
233
}
234
235
return atom;
236
}
237
238
unsigned WasmFrameIter::lineOrBytecode() const {
239
MOZ_ASSERT(!done());
240
return lineOrBytecode_;
241
}
242
243
uint32_t WasmFrameIter::funcIndex() const {
244
MOZ_ASSERT(!done());
245
return codeRange_->funcIndex();
246
}
247
248
unsigned WasmFrameIter::computeLine(uint32_t* column) const {
249
if (instance()->isAsmJS()) {
250
if (column) {
251
*column = 1;
252
}
253
return lineOrBytecode_;
254
}
255
256
// As a terrible hack to avoid changing the tons of places that pass around
257
// (url, line, column) tuples to instead passing around a Variant that
258
// stores a (url, func-index, bytecode-offset) tuple for wasm frames,
259
// wasm stuffs its tuple into the existing (url, line, column) tuple,
260
// tagging the high bit of the column to indicate "this is a wasm frame".
261
// When knowing clients see this bit, they shall render the tuple
262
// (url, line, column|bit) as "url:wasm-function[column]:0xline" according
263
// to the WebAssembly Web API's Developer-Facing Display Conventions.
265
// The wasm bytecode offset continues to be passed as the JS line to avoid
266
// breaking existing devtools code written when this used to be the case.
267
268
MOZ_ASSERT(!(codeRange_->funcIndex() & ColumnBit));
269
if (column) {
270
*column = codeRange_->funcIndex() | ColumnBit;
271
}
272
return lineOrBytecode_;
273
}
274
275
Instance* WasmFrameIter::instance() const {
276
MOZ_ASSERT(!done());
277
return fp_->tls->instance;
278
}
279
280
void** WasmFrameIter::unwoundAddressOfReturnAddress() const {
281
MOZ_ASSERT(done());
282
MOZ_ASSERT(unwind_ == Unwind::True);
283
MOZ_ASSERT(unwoundAddressOfReturnAddress_);
284
return unwoundAddressOfReturnAddress_;
285
}
286
287
bool WasmFrameIter::debugEnabled() const {
288
MOZ_ASSERT(!done());
289
290
// Only non-imported functions can have debug frames.
291
//
292
// Metadata::debugEnabled is only set if debugging is actually enabled (both
293
// requested, and available via baseline compilation), and Tier::Debug code
294
// will be available.
295
return code_->metadata().debugEnabled &&
296
codeRange_->funcIndex() >=
297
code_->metadata(Tier::Debug).funcImports.length();
298
}
299
300
DebugFrame* WasmFrameIter::debugFrame() const {
301
MOZ_ASSERT(!done());
302
return DebugFrame::from(fp_);
303
}
304
305
jit::FrameType WasmFrameIter::unwoundIonFrameType() const {
306
MOZ_ASSERT(unwoundIonCallerFP_);
307
MOZ_ASSERT(unwoundIonFrameType_ != jit::FrameType(-1));
308
return unwoundIonFrameType_;
309
}
310
311
uint8_t* WasmFrameIter::resumePCinCurrentFrame() const {
312
if (resumePCinCurrentFrame_) {
313
return resumePCinCurrentFrame_;
314
}
315
MOZ_ASSERT(activation_->isWasmTrapping());
316
// The next instruction is the instruction following the trap instruction.
317
return (uint8_t*)activation_->wasmTrapData().resumePC;
318
}
319
320
/*****************************************************************************/
321
// Prologue/epilogue code generation
322
323
// These constants reflect statically-determined offsets in the
324
// prologue/epilogue. The offsets are dynamically asserted during code
325
// generation.
326
#if defined(JS_CODEGEN_X64)
327
static const unsigned PushedRetAddr = 0;
328
static const unsigned PushedTLS = 2;
329
static const unsigned PushedFP = 3;
330
static const unsigned SetFP = 6;
331
static const unsigned PoppedFP = 2;
332
static const unsigned PoppedTLSReg = 0;
333
#elif defined(JS_CODEGEN_X86)
334
static const unsigned PushedRetAddr = 0;
335
static const unsigned PushedTLS = 1;
336
static const unsigned PushedFP = 2;
337
static const unsigned SetFP = 4;
338
static const unsigned PoppedFP = 1;
339
static const unsigned PoppedTLSReg = 0;
340
#elif defined(JS_CODEGEN_ARM)
341
static const unsigned BeforePushRetAddr = 0;
342
static const unsigned PushedRetAddr = 4;
343
static const unsigned PushedTLS = 8;
344
static const unsigned PushedFP = 12;
345
static const unsigned SetFP = 16;
346
static const unsigned PoppedFP = 4;
347
static const unsigned PoppedTLSReg = 0;
348
#elif defined(JS_CODEGEN_ARM64)
349
// On ARM64 we do not use push or pop; the prologues and epilogues are
350
// structured differently due to restrictions on SP alignment. Even so,
351
// PushedRetAddr, PushedTLS, and PushedFP are used in some restricted contexts
352
// and must be superficially meaningful.
353
static const unsigned BeforePushRetAddr = 0;
354
static const unsigned PushedRetAddr = 8;
355
static const unsigned PushedTLS = 12;
356
static const unsigned PushedFP = 16;
357
static const unsigned SetFP = 20;
358
static const unsigned PoppedFP = 8;
359
static const unsigned PoppedTLSReg = 4;
360
static_assert(BeforePushRetAddr == 0, "Required by StartUnwinding");
361
static_assert(PushedFP > PushedRetAddr, "Required by StartUnwinding");
362
static_assert(PushedFP > PushedTLS, "Required by StartUnwinding");
363
static_assert(PoppedFP > PoppedTLSReg, "Required by StartUnwinding");
364
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
365
static const unsigned PushedRetAddr = 8;
366
static const unsigned PushedTLS = 12;
367
static const unsigned PushedFP = 16;
368
static const unsigned SetFP = 20;
369
static const unsigned PoppedFP = 8;
370
static const unsigned PoppedTLSReg = 4;
371
#elif defined(JS_CODEGEN_NONE)
372
// Synthetic values to satisfy asserts and avoid compiler warnings.
373
static const unsigned PushedRetAddr = 0;
374
static const unsigned PushedTLS = 1;
375
static const unsigned PushedFP = 2;
376
static const unsigned SetFP = 3;
377
static const unsigned PoppedFP = 4;
378
static const unsigned PoppedTLSReg = 5;
379
#else
380
# error "Unknown architecture!"
381
#endif
382
static constexpr unsigned SetJitEntryFP = PushedRetAddr + SetFP - PushedFP;
383
384
static void LoadActivation(MacroAssembler& masm, const Register& dest) {
385
// WasmCall pushes a JitActivation.
386
masm.loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
387
masm.loadPtr(Address(dest, JSContext::offsetOfActivation()), dest);
388
}
389
390
void wasm::SetExitFP(MacroAssembler& masm, ExitReason reason,
391
Register scratch) {
392
MOZ_ASSERT(!reason.isNone());
393
394
LoadActivation(masm, scratch);
395
396
masm.store32(
397
Imm32(reason.encode()),
398
Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
399
400
masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
401
masm.storePtr(FramePointer,
402
Address(scratch, JitActivation::offsetOfPackedExitFP()));
403
masm.andPtr(Imm32(int32_t(~ExitOrJitEntryFPTag)), FramePointer);
404
}
405
406
void wasm::ClearExitFP(MacroAssembler& masm, Register scratch) {
407
LoadActivation(masm, scratch);
408
masm.storePtr(ImmWord(0x0),
409
Address(scratch, JitActivation::offsetOfPackedExitFP()));
410
masm.store32(
411
Imm32(0x0),
412
Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
413
}
414
415
static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) {
416
masm.setFramePushed(0);
417
418
// ProfilingFrameIterator needs to know the offsets of several key
419
// instructions from entry. To save space, we make these offsets static
420
// constants and assert that they match the actual codegen below. On ARM,
421
// this requires AutoForbidPoolsAndNops to prevent a constant pool from being
422
// randomly inserted between two instructions.
423
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
424
{
425
*entry = masm.currentOffset();
426
427
masm.subFromStackPtr(Imm32(sizeof(Frame)));
428
masm.storePtr(ra, Address(StackPointer, offsetof(Frame, returnAddress)));
429
MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
430
masm.storePtr(WasmTlsReg, Address(StackPointer, offsetof(Frame, tls)));
431
MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
432
masm.storePtr(FramePointer,
433
Address(StackPointer, offsetof(Frame, callerFP)));
434
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
435
masm.moveStackPtrTo(FramePointer);
436
MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
437
}
438
#elif defined(JS_CODEGEN_ARM64)
439
{
440
// We do not use the PseudoStackPointer.
441
MOZ_ASSERT(masm.GetStackPointer64().code() == sp.code());
442
443
AutoForbidPoolsAndNops afp(&masm,
444
/* number of instructions in scope = */ 5);
445
446
*entry = masm.currentOffset();
447
448
masm.Sub(sp, sp, sizeof(Frame));
449
masm.Str(ARMRegister(lr, 64),
450
MemOperand(sp, offsetof(Frame, returnAddress)));
451
MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
452
masm.Str(ARMRegister(WasmTlsReg, 64), MemOperand(sp, offsetof(Frame, tls)));
453
MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
454
masm.Str(ARMRegister(FramePointer, 64),
455
MemOperand(sp, offsetof(Frame, callerFP)));
456
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
457
masm.Mov(ARMRegister(FramePointer, 64), sp);
458
MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
459
}
460
#else
461
{
462
# if defined(JS_CODEGEN_ARM)
463
AutoForbidPoolsAndNops afp(&masm,
464
/* number of instructions in scope = */ 7);
465
466
*entry = masm.currentOffset();
467
468
MOZ_ASSERT(BeforePushRetAddr == 0);
469
masm.push(lr);
470
# else
471
*entry = masm.currentOffset();
472
// The x86/x64 call instruction pushes the return address.
473
# endif
474
475
MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
476
masm.push(WasmTlsReg);
477
MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
478
masm.push(FramePointer);
479
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
480
masm.moveStackPtrTo(FramePointer);
481
MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
482
}
483
#endif
484
}
485
486
static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
487
ExitReason reason, uint32_t* ret) {
488
if (framePushed) {
489
masm.freeStack(framePushed);
490
}
491
492
if (!reason.isNone()) {
493
ClearExitFP(masm, ABINonArgReturnVolatileReg);
494
}
495
496
DebugOnly<uint32_t> poppedFP;
497
DebugOnly<uint32_t> poppedTlsReg;
498
499
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
500
501
masm.loadPtr(Address(StackPointer, offsetof(Frame, callerFP)), FramePointer);
502
poppedFP = masm.currentOffset();
503
masm.loadPtr(Address(StackPointer, offsetof(Frame, tls)), WasmTlsReg);
504
poppedTlsReg = masm.currentOffset();
505
masm.loadPtr(Address(StackPointer, offsetof(Frame, returnAddress)), ra);
506
507
*ret = masm.currentOffset();
508
masm.as_jr(ra);
509
masm.addToStackPtr(Imm32(sizeof(Frame)));
510
511
#elif defined(JS_CODEGEN_ARM64)
512
513
// We do not use the PseudoStackPointer.
514
MOZ_ASSERT(masm.GetStackPointer64().code() == sp.code());
515
516
AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 5);
517
518
masm.Ldr(ARMRegister(FramePointer, 64),
519
MemOperand(sp, offsetof(Frame, callerFP)));
520
poppedFP = masm.currentOffset();
521
522
masm.Ldr(ARMRegister(WasmTlsReg, 64), MemOperand(sp, offsetof(Frame, tls)));
523
poppedTlsReg = masm.currentOffset();
524
525
masm.Ldr(ARMRegister(lr, 64), MemOperand(sp, offsetof(Frame, returnAddress)));
526
*ret = masm.currentOffset();
527
528
masm.Add(sp, sp, sizeof(Frame));
529
masm.Ret(ARMRegister(lr, 64));
530
531
#else
532
// Forbid pools for the same reason as described in GenerateCallablePrologue.
533
# if defined(JS_CODEGEN_ARM)
534
AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 7);
535
# endif
536
537
// There is an important ordering constraint here: fp must be repointed to
538
// the caller's frame before any field of the frame currently pointed to by
539
// fp is popped: asynchronous signal handlers (which use stack space
540
// starting at sp) could otherwise clobber these fields while they are still
541
// accessible via fp (fp fields are read during frame iteration which is
542
// *also* done asynchronously).
543
544
masm.pop(FramePointer);
545
poppedFP = masm.currentOffset();
546
547
masm.pop(WasmTlsReg);
548
poppedTlsReg = masm.currentOffset();
549
550
*ret = masm.currentOffset();
551
masm.ret();
552
553
#endif
554
555
MOZ_ASSERT_IF(!masm.oom(), PoppedFP == *ret - poppedFP);
556
MOZ_ASSERT_IF(!masm.oom(), PoppedTLSReg == *ret - poppedTlsReg);
557
}
558
559
void wasm::GenerateFunctionPrologue(MacroAssembler& masm,
560
const FuncTypeIdDesc& funcTypeId,
561
const Maybe<uint32_t>& tier1FuncIndex,
562
FuncOffsets* offsets) {
563
// Flush pending pools so they do not get dumped between the 'begin' and
564
// 'normalEntry' offsets since the difference must be less than UINT8_MAX
565
// to be stored in CodeRange::funcBeginToNormalEntry_.
566
masm.flushBuffer();
567
masm.haltingAlign(CodeAlignment);
568
569
// The table entry falls through into the normal entry after it has checked
570
// the signature.
571
Label normalEntry;
572
573
// Generate table entry. The BytecodeOffset of the trap is fixed up to be
574
// the bytecode offset of the callsite by JitActivation::startWasmTrap.
575
offsets->begin = masm.currentOffset();
576
switch (funcTypeId.kind()) {
577
case FuncTypeIdDescKind::Global: {
578
Register scratch = WasmTableCallScratchReg0;
579
masm.loadWasmGlobalPtr(funcTypeId.globalDataOffset(), scratch);
580
masm.branchPtr(Assembler::Condition::Equal, WasmTableCallSigReg, scratch,
581
&normalEntry);
582
masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
583
break;
584
}
585
case FuncTypeIdDescKind::Immediate: {
586
masm.branch32(Assembler::Condition::Equal, WasmTableCallSigReg,
587
Imm32(funcTypeId.immediate()), &normalEntry);
588
masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
589
break;
590
}
591
case FuncTypeIdDescKind::None:
592
break;
593
}
594
595
// The table entry might have generated a small constant pool in case of
596
// immediate comparison.
597
masm.flushBuffer();
598
599
// Generate normal entry:
600
masm.nopAlign(CodeAlignment);
601
masm.bind(&normalEntry);
602
GenerateCallablePrologue(masm, &offsets->normalEntry);
603
604
// Tiering works as follows. The Code owns a jumpTable, which has one
605
// pointer-sized element for each function up to the largest funcIndex in
606
// the module. Each table element is an address into the Tier-1 or the
607
// Tier-2 function at that index; the elements are updated when Tier-2 code
608
// becomes available. The Tier-1 function will unconditionally jump to this
609
// address. The table elements are written racily but without tearing when
610
// Tier-2 compilation is finished.
611
//
612
// The address in the table is either to the instruction following the jump
613
// in Tier-1 code, or into the function prologue after the standard setup in
614
// Tier-2 code. Effectively, Tier-1 code performs standard frame setup on
615
// behalf of whatever code it jumps to, and the target code allocates its
616
// own frame in whatever way it wants.
617
if (tier1FuncIndex) {
618
Register scratch = ABINonArgReg0;
619
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, jumpTable)), scratch);
620
masm.jump(Address(scratch, *tier1FuncIndex * sizeof(uintptr_t)));
621
}
622
623
offsets->tierEntry = masm.currentOffset();
624
625
MOZ_ASSERT(masm.framePushed() == 0);
626
}
627
628
void wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
629
FuncOffsets* offsets) {
630
// Inverse of GenerateFunctionPrologue:
631
MOZ_ASSERT(masm.framePushed() == framePushed);
632
GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
633
&offsets->ret);
634
MOZ_ASSERT(masm.framePushed() == 0);
635
}
636
637
void wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed,
638
ExitReason reason, CallableOffsets* offsets) {
639
masm.haltingAlign(CodeAlignment);
640
641
GenerateCallablePrologue(masm, &offsets->begin);
642
643
// This frame will be exiting compiled code to C++ so record the fp and
644
// reason in the JitActivation so the frame iterators can unwind.
645
SetExitFP(masm, reason, ABINonArgReturnVolatileReg);
646
647
MOZ_ASSERT(masm.framePushed() == 0);
648
masm.reserveStack(framePushed);
649
}
650
651
void wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed,
652
ExitReason reason, CallableOffsets* offsets) {
653
// Inverse of GenerateExitPrologue:
654
MOZ_ASSERT(masm.framePushed() == framePushed);
655
GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
656
MOZ_ASSERT(masm.framePushed() == 0);
657
}
658
659
static void AssertNoWasmExitFPInJitExit(MacroAssembler& masm) {
660
// As a general stack invariant, if Activation::packedExitFP is tagged as
661
// wasm, it must point to a valid wasm::Frame. The JIT exit stub calls into
662
// JIT code and thus does not really exit, thus, when entering/leaving the
663
// JIT exit stub from/to normal wasm code, packedExitFP is not tagged wasm.
664
#ifdef DEBUG
665
Register scratch = ABINonArgReturnReg0;
666
LoadActivation(masm, scratch);
667
668
Label ok;
669
masm.branchTestPtr(Assembler::Zero,
670
Address(scratch, JitActivation::offsetOfPackedExitFP()),
671
Imm32(uintptr_t(ExitOrJitEntryFPTag)), &ok);
672
masm.breakpoint();
673
masm.bind(&ok);
674
#endif
675
}
676
677
void wasm::GenerateJitExitPrologue(MacroAssembler& masm, unsigned framePushed,
678
CallableOffsets* offsets) {
679
masm.haltingAlign(CodeAlignment);
680
681
GenerateCallablePrologue(masm, &offsets->begin);
682
AssertNoWasmExitFPInJitExit(masm);
683
684
MOZ_ASSERT(masm.framePushed() == 0);
685
masm.reserveStack(framePushed);
686
}
687
688
void wasm::GenerateJitExitEpilogue(MacroAssembler& masm, unsigned framePushed,
689
CallableOffsets* offsets) {
690
// Inverse of GenerateJitExitPrologue:
691
MOZ_ASSERT(masm.framePushed() == framePushed);
692
AssertNoWasmExitFPInJitExit(masm);
693
GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
694
&offsets->ret);
695
MOZ_ASSERT(masm.framePushed() == 0);
696
}
697
698
void wasm::GenerateJitEntryPrologue(MacroAssembler& masm, Offsets* offsets) {
699
masm.haltingAlign(CodeAlignment);
700
701
{
702
#if defined(JS_CODEGEN_ARM)
703
AutoForbidPoolsAndNops afp(&masm,
704
/* number of instructions in scope = */ 2);
705
offsets->begin = masm.currentOffset();
706
MOZ_ASSERT(BeforePushRetAddr == 0);
707
masm.push(lr);
708
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
709
offsets->begin = masm.currentOffset();
710
masm.push(ra);
711
#elif defined(JS_CODEGEN_ARM64)
712
AutoForbidPoolsAndNops afp(&masm,
713
/* number of instructions in scope = */ 3);
714
offsets->begin = masm.currentOffset();
715
MOZ_ASSERT(BeforePushRetAddr == 0);
716
// Subtract from SP first as SP must be aligned before offsetting.
717
masm.Sub(sp, sp, 8);
718
masm.storePtr(lr, Address(masm.getStackPointer(), 0));
719
masm.adjustFrame(8);
720
#else
721
// The x86/x64 call instruction pushes the return address.
722
offsets->begin = masm.currentOffset();
723
#endif
724
MOZ_ASSERT_IF(!masm.oom(),
725
PushedRetAddr == masm.currentOffset() - offsets->begin);
726
727
// Save jit frame pointer, so unwinding from wasm to jit frames is trivial.
728
masm.moveStackPtrTo(FramePointer);
729
MOZ_ASSERT_IF(!masm.oom(),
730
SetJitEntryFP == masm.currentOffset() - offsets->begin);
731
}
732
733
masm.setFramePushed(0);
734
}
735
736
/*****************************************************************************/
737
// ProfilingFrameIterator
738
739
ProfilingFrameIterator::ProfilingFrameIterator()
740
: code_(nullptr),
741
codeRange_(nullptr),
742
callerFP_(nullptr),
743
callerPC_(nullptr),
744
stackAddress_(nullptr),
745
unwoundIonCallerFP_(nullptr),
746
exitReason_(ExitReason::Fixed::None) {
747
MOZ_ASSERT(done());
748
}
749
750
ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation)
751
: code_(nullptr),
752
codeRange_(nullptr),
753
callerFP_(nullptr),
754
callerPC_(nullptr),
755
stackAddress_(nullptr),
756
unwoundIonCallerFP_(nullptr),
757
exitReason_(activation.wasmExitReason()) {
758
initFromExitFP(activation.wasmExitFP());
759
}
760
761
ProfilingFrameIterator::ProfilingFrameIterator(const Frame* fp)
762
: code_(nullptr),
763
codeRange_(nullptr),
764
callerFP_(nullptr),
765
callerPC_(nullptr),
766
stackAddress_(nullptr),
767
unwoundIonCallerFP_(nullptr),
768
exitReason_(ExitReason::Fixed::ImportJit) {
769
MOZ_ASSERT(fp);
770
initFromExitFP(fp);
771
}
772
773
static inline void AssertDirectJitCall(const void* fp) {
774
// Called via an inlined fast JIT to wasm call: in this case, FP is
775
// pointing in the middle of the exit frame, right before the exit
776
// footer; ensure the exit frame type is the expected one.
777
#ifdef DEBUG
778
auto* jitCaller = (ExitFrameLayout*)(uintptr_t(fp) & ~ExitOrJitEntryFPTag);
779
MOZ_ASSERT(jitCaller->footer()->type() ==
780
jit::ExitFrameType::DirectWasmJitCall);
781
#endif
782
}
783
784
static inline void AssertMatchesCallSite(void* callerPC, Frame* callerFP) {
785
#ifdef DEBUG
786
const CodeRange* callerCodeRange;
787
const Code* code = LookupCode(callerPC, &callerCodeRange);
788
789
if (!code) {
790
AssertDirectJitCall(callerFP);
791
return;
792
}
793
794
MOZ_ASSERT(callerCodeRange);
795
796
if (callerCodeRange->isInterpEntry()) {
797
MOZ_ASSERT(callerFP == nullptr);
798
return;
799
}
800
801
if (callerCodeRange->isJitEntry()) {
802
MOZ_ASSERT(callerFP != nullptr);
803
return;
804
}
805
806
const CallSite* callsite = code->lookupCallSite(callerPC);
807
MOZ_ASSERT(callsite);
808
#endif
809
}
810
811
void ProfilingFrameIterator::initFromExitFP(const Frame* fp) {
812
MOZ_ASSERT(fp);
813
stackAddress_ = (void*)fp;
814
815
void* pc = fp->returnAddress;
816
817
code_ = LookupCode(pc, &codeRange_);
818
819
if (!code_) {
820
// This is a direct call from the JIT, the caller FP is pointing to a
821
// tagged JIT caller's frame.
822
MOZ_ASSERT(uintptr_t(fp->callerFP) & ExitOrJitEntryFPTag);
823
AssertDirectJitCall(fp->callerFP);
824
825
unwoundIonCallerFP_ =
826
(uint8_t*)(uintptr_t(fp->callerFP) & ~ExitOrJitEntryFPTag);
827
MOZ_ASSERT(done());
828
return;
829
}
830
831
MOZ_ASSERT(codeRange_);
832
833
// Since we don't have the pc for fp, start unwinding at the caller of fp.
834
// This means that the innermost frame is skipped. This is fine because:
835
// - for import exit calls, the innermost frame is a thunk, so the first
836
// frame that shows up is the function calling the import;
837
// - for Math and other builtin calls, we note the absence of an exit
838
// reason and inject a fake "builtin" frame; and
839
switch (codeRange_->kind()) {
840
case CodeRange::InterpEntry:
841
callerPC_ = nullptr;
842
callerFP_ = nullptr;
843
codeRange_ = nullptr;
844
exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
845
break;
846
case CodeRange::JitEntry:
847
callerPC_ = nullptr;
848
callerFP_ = nullptr;
849
unwoundIonCallerFP_ = (uint8_t*)fp->callerFP;
850
break;
851
case CodeRange::Function:
852
fp = fp->callerFP;
853
callerPC_ = fp->returnAddress;
854
callerFP_ = fp->callerFP;
855
AssertMatchesCallSite(callerPC_, callerFP_);
856
break;
857
case CodeRange::ImportJitExit:
858
case CodeRange::ImportInterpExit:
859
case CodeRange::BuiltinThunk:
860
case CodeRange::TrapExit:
861
case CodeRange::DebugTrap:
862
case CodeRange::Throw:
863
case CodeRange::FarJumpIsland:
864
MOZ_CRASH("Unexpected CodeRange kind");
865
}
866
867
MOZ_ASSERT(!done());
868
}
869
870
static void AssertCallerFP(DebugOnly<bool> fpWasTagged, Frame* const fp,
871
void** const sp) {
872
MOZ_ASSERT_IF(!fpWasTagged.value,
873
fp == reinterpret_cast<Frame*>(sp)->callerFP);
874
MOZ_ASSERT_IF(fpWasTagged.value, (Frame*)(uintptr_t(fp) | 0x1) ==
875
reinterpret_cast<Frame*>(sp)->callerFP);
876
}
877
878
bool js::wasm::StartUnwinding(const RegisterState& registers,
879
UnwindState* unwindState, bool* unwoundCaller) {
880
// Shorthands.
881
uint8_t* const pc = (uint8_t*)registers.pc;
882
void** const sp = (void**)registers.sp;
883
884
// The frame pointer might be:
885
// - in the process of tagging/untagging when calling into the JITs;
886
// make sure it's untagged.
887
// - tagged by an direct JIT call.
888
// - unreliable if it's not been set yet, in prologues.
889
DebugOnly<bool> fpWasTagged = uintptr_t(registers.fp) & ExitOrJitEntryFPTag;
890
Frame* const fp = (Frame*)(intptr_t(registers.fp) & ~ExitOrJitEntryFPTag);
891
892
// Get the CodeRange describing pc and the base address to which the
893
// CodeRange is relative. If the pc is not in a wasm module or a builtin
894
// thunk, then execution must be entering from or leaving to the C++ caller
895
// that pushed the JitActivation.
896
const CodeRange* codeRange;
897
uint8_t* codeBase;
898
const Code* code = nullptr;
899
900
const CodeSegment* codeSegment = LookupCodeSegment(pc, &codeRange);
901
if (codeSegment) {
902
code = &codeSegment->code();
903
codeBase = codeSegment->base();
904
MOZ_ASSERT(codeRange);
905
} else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) {
906
return false;
907
}
908
909
// When the pc is inside the prologue/epilogue, the innermost call's Frame
910
// is not complete and thus fp points to the second-to-innermost call's
911
// Frame. Since fp can only tell you about its caller, naively unwinding
912
// while pc is in the prologue/epilogue would skip the second-to-innermost
913
// call. To avoid this problem, we use the static structure of the code in
914
// the prologue and epilogue to do the Right Thing.
915
uint32_t offsetInCode = pc - codeBase;
916
MOZ_ASSERT(offsetInCode >= codeRange->begin());
917
MOZ_ASSERT(offsetInCode < codeRange->end());
918
919
// Compute the offset of the pc from the (normal) entry of the code range.
920
// The stack state of the pc for the entire table-entry is equivalent to
921
// that of the first pc of the normal-entry. Thus, we can simplify the below
922
// case analysis by redirecting all pc-in-table-entry cases to the
923
// pc-at-normal-entry case.
924
uint32_t offsetFromEntry;
925
if (codeRange->isFunction()) {
926
if (offsetInCode < codeRange->funcNormalEntry()) {
927
offsetFromEntry = 0;
928
} else {
929
offsetFromEntry = offsetInCode - codeRange->funcNormalEntry();
930
}
931
} else {
932
offsetFromEntry = offsetInCode - codeRange->begin();
933
}
934
935
// Most cases end up unwinding to the caller state; not unwinding is the
936
// exception here.
937
*unwoundCaller = true;
938
939
Frame* fixedFP = nullptr;
940
void* fixedPC = nullptr;
941
switch (codeRange->kind()) {
942
case CodeRange::Function:
943
case CodeRange::FarJumpIsland:
944
case CodeRange::ImportJitExit:
945
case CodeRange::ImportInterpExit:
946
case CodeRange::BuiltinThunk:
947
case CodeRange::DebugTrap:
948
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
949
if (codeRange->isThunk()) {
950
// The FarJumpIsland sequence temporary scrambles ra.
951
// Don't unwind to caller.
952
fixedPC = pc;
953
fixedFP = fp;
954
*unwoundCaller = false;
955
AssertMatchesCallSite(fp->returnAddress, fp->callerFP);
956
} else if (offsetFromEntry < PushedFP) {
957
// On MIPS we rely on register state instead of state saved on
958
// stack until the wasm::Frame is completely built.
959
// On entry the return address is in ra (registers.lr) and
960
// fp holds the caller's fp.
961
fixedPC = (uint8_t*)registers.lr;
962
fixedFP = fp;
963
AssertMatchesCallSite(fixedPC, fixedFP);
964
} else
965
#elif defined(JS_CODEGEN_ARM64)
966
if (offsetFromEntry < PushedFP || codeRange->isThunk()) {
967
// Constraints above ensure that this covers BeforePushRetAddr,
968
// PushedRetAddr, and PushedTLS.
969
//
970
// On ARM64 we subtract the size of the Frame from SP and then store
971
// values into the stack. Execution can be interrupted at various
972
// places in that sequence. We rely on the register state for our
973
// values.
974
fixedPC = (uint8_t*)registers.lr;
975
fixedFP = fp;
976
AssertMatchesCallSite(fixedPC, fixedFP);
977
} else
978
#elif defined(JS_CODEGEN_ARM)
979
if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
980
// The return address is still in lr and fp holds the caller's fp.
981
fixedPC = (uint8_t*)registers.lr;
982
fixedFP = fp;
983
AssertMatchesCallSite(fixedPC, fixedFP);
984
} else
985
#endif
986
if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
987
// The return address has been pushed on the stack but fp still
988
// points to the caller's fp.
989
fixedPC = sp[0];
990
fixedFP = fp;
991
AssertMatchesCallSite(fixedPC, fixedFP);
992
} else if (offsetFromEntry >= PushedTLS && offsetFromEntry < PushedFP) {
993
// The return address and caller's TLS have been pushed on the
994
// stack; fp is still the caller's fp.
995
fixedPC = sp[1];
996
fixedFP = fp;
997
AssertMatchesCallSite(fixedPC, fixedFP);
998
} else if (offsetFromEntry == PushedFP) {
999
// The full Frame has been pushed; fp is still the caller's fp.
1000
AssertCallerFP(fpWasTagged, fp, sp);
1001
fixedPC = reinterpret_cast<Frame*>(sp)->returnAddress;
1002
fixedFP = fp;
1003
AssertMatchesCallSite(fixedPC, fixedFP);
1004
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1005
} else if (offsetInCode >= codeRange->ret() - PoppedFP &&
1006
offsetInCode <= codeRange->ret()) {
1007
(void)PoppedTLSReg;
1008
// The fixedFP field of the Frame has been loaded into fp.
1009
// The ra and TLS might also be loaded, but the Frame structure is
1010
// still on stack, so we can acess the ra form there.
1011
AssertCallerFP(fpWasTagged, fp, sp);
1012
fixedPC = reinterpret_cast<Frame*>(sp)->returnAddress;
1013
fixedFP = fp;
1014
AssertMatchesCallSite(fixedPC, fixedFP);
1015
#elif defined(JS_CODEGEN_ARM64)
1016
// The stack pointer does not move until all values have
1017
// been restored so several cases can be coalesced here.
1018
} else if (offsetInCode >= codeRange->ret() - PoppedFP &&
1019
offsetInCode <= codeRange->ret()) {
1020
fixedPC = reinterpret_cast<Frame*>(sp)->returnAddress;
1021
fixedFP = fp;
1022
AssertMatchesCallSite(fixedPC, fixedFP);
1023
#else
1024
} else if (offsetInCode >= codeRange->ret() - PoppedFP &&
1025
offsetInCode < codeRange->ret() - PoppedTLSReg) {
1026
// The fixedFP field of the Frame has been popped into fp.
1027
fixedPC = sp[1];
1028
fixedFP = fp;
1029
AssertMatchesCallSite(fixedPC, fixedFP);
1030
} else if (offsetInCode == codeRange->ret()) {
1031
// Both the TLS and fixedFP fields have been popped and fp now
1032
// points to the caller's frame.
1033
fixedPC = sp[0];
1034
fixedFP = fp;
1035
AssertMatchesCallSite(fixedPC, fixedFP);
1036
#endif
1037
} else {
1038
if (codeRange->kind() == CodeRange::ImportJitExit) {
1039
// The jit exit contains a range where the value of FP can't be
1040
// trusted. Technically, we could recover fp from sp, but since
1041
// the range is so short, for now just drop the stack.
1042
if (offsetInCode >= codeRange->jitExitUntrustedFPStart() &&
1043
offsetInCode < codeRange->jitExitUntrustedFPEnd()) {
1044
return false;
1045
}
1046
}
1047
// Not in the prologue/epilogue.
1048
fixedPC = pc;
1049
fixedFP = fp;
1050
*unwoundCaller = false;
1051
AssertMatchesCallSite(fp->returnAddress, fp->callerFP);
1052
break;
1053
}
1054
break;
1055
case CodeRange::TrapExit:
1056
// These code stubs execute after the prologue/epilogue have completed
1057
// so pc/fp contains the right values here.
1058
fixedPC = pc;
1059
fixedFP = fp;
1060
*unwoundCaller = false;
1061
AssertMatchesCallSite(fp->returnAddress, fp->callerFP);
1062
break;
1063
case CodeRange::InterpEntry:
1064
// The entry trampoline is the final frame in an wasm JitActivation. The
1065
// entry trampoline also doesn't GeneratePrologue/Epilogue so we can't
1066
// use the general unwinding logic above.
1067
break;
1068
case CodeRange::JitEntry:
1069
// There's a jit frame above the current one; we don't care about pc
1070
// since the Jit entry frame is a jit frame which can be considered as
1071
// an exit frame.
1072
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
1073
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1074
if (offsetFromEntry < PushedRetAddr) {
1075
// We haven't pushed the jit return address yet, thus the jit
1076
// frame is incomplete. During profiling frame iteration, it means
1077
// that the jit profiling frame iterator won't be able to unwind
1078
// this frame; drop it.
1079
return false;
1080
}
1081
#endif
1082
fixedFP = offsetFromEntry < SetJitEntryFP ? (Frame*)sp : fp;
1083
fixedPC = nullptr;
1084
1085
// On the error return path, FP might be set to FailFP. Ignore these
1086
// transient frames.
1087
if (intptr_t(fixedFP) == (FailFP & ~ExitOrJitEntryFPTag)) {
1088
return false;
1089
}
1090
break;
1091
case CodeRange::Throw:
1092
// The throw stub executes a small number of instructions before popping
1093
// the entire activation. To simplify testing, we simply pretend throw
1094
// stubs have already popped the entire stack.
1095
return false;
1096
}
1097
1098
unwindState->code = code;
1099
unwindState->codeRange = codeRange;
1100
unwindState->fp = fixedFP;
1101
unwindState->pc = fixedPC;
1102
return true;
1103
}
1104
1105
ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation,
1106
const RegisterState& state)
1107
: code_(nullptr),
1108
codeRange_(nullptr),
1109
callerFP_(nullptr),
1110
callerPC_(nullptr),
1111
stackAddress_(nullptr),
1112
unwoundIonCallerFP_(nullptr),
1113
exitReason_(ExitReason::Fixed::None) {
1114
// Let wasmExitFP take precedence to StartUnwinding when it is set since
1115
// during the body of an exit stub, the register state may not be valid
1116
// causing StartUnwinding() to abandon unwinding this activation.
1117
if (activation.hasWasmExitFP()) {
1118
exitReason_ = activation.wasmExitReason();
1119
initFromExitFP(activation.wasmExitFP());
1120
return;
1121
}
1122
1123
bool unwoundCaller;
1124
UnwindState unwindState;
1125
if (!StartUnwinding(state, &unwindState, &unwoundCaller)) {
1126
MOZ_ASSERT(done());
1127
return;
1128
}
1129
1130
MOZ_ASSERT(unwindState.codeRange);
1131
1132
if (unwoundCaller) {
1133
callerFP_ = unwindState.fp;
1134
callerPC_ = unwindState.pc;
1135
// In the case of a function call, if the original FP value is tagged,
1136
// then we're being called through a direct JIT call (the interpreter
1137
// and the jit entry don't set FP's low bit). We can't observe
1138
// transient tagged values of FP (during wasm::SetExitFP) here because
1139
// StartUnwinding would not have unwound then.
1140
if (unwindState.codeRange->isFunction() &&
1141
(uintptr_t(state.fp) & ExitOrJitEntryFPTag)) {
1142
unwoundIonCallerFP_ = (uint8_t*)callerFP_;
1143
}
1144
} else {
1145
callerFP_ = unwindState.fp->callerFP;
1146
callerPC_ = unwindState.fp->returnAddress;
1147
// See comment above. The only way to get a tagged FP here means that
1148
// the caller is a fast JIT caller which called into a wasm function.
1149
if ((uintptr_t(callerFP_) & ExitOrJitEntryFPTag)) {
1150
MOZ_ASSERT(unwindState.codeRange->isFunction());
1151
unwoundIonCallerFP_ =
1152
(uint8_t*)(uintptr_t(callerFP_) & ~ExitOrJitEntryFPTag);
1153
}
1154
}
1155
1156
if (unwindState.codeRange->isJitEntry()) {
1157
MOZ_ASSERT(!unwoundIonCallerFP_);
1158
unwoundIonCallerFP_ = (uint8_t*)callerFP_;
1159
}
1160
1161
if (unwindState.codeRange->isInterpEntry()) {
1162
unwindState.codeRange = nullptr;
1163
exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
1164
}
1165
1166
code_ = unwindState.code;
1167
codeRange_ = unwindState.codeRange;
1168
stackAddress_ = state.sp;
1169
MOZ_ASSERT(!done());
1170
}
1171
1172
void ProfilingFrameIterator::operator++() {
1173
if (!exitReason_.isNone()) {
1174
DebugOnly<bool> wasInterpEntry = exitReason_.isInterpEntry();
1175
exitReason_ = ExitReason::None();
1176
MOZ_ASSERT((!codeRange_) == wasInterpEntry);
1177
MOZ_ASSERT(done() == wasInterpEntry);
1178
return;
1179
}
1180
1181
if (unwoundIonCallerFP_) {
1182
MOZ_ASSERT(codeRange_->isFunction() || codeRange_->isJitEntry());
1183
callerPC_ = nullptr;
1184
callerFP_ = nullptr;
1185
codeRange_ = nullptr;
1186
MOZ_ASSERT(done());
1187
return;
1188
}
1189
1190
if (!callerPC_) {
1191
MOZ_ASSERT(!callerFP_);
1192
codeRange_ = nullptr;
1193
MOZ_ASSERT(done());
1194
return;
1195
}
1196
1197
if (!callerFP_) {
1198
MOZ_ASSERT(LookupCode(callerPC_, &codeRange_) == code_);
1199
MOZ_ASSERT(codeRange_->kind() == CodeRange::InterpEntry);
1200
exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
1201
codeRange_ = nullptr;
1202
callerPC_ = nullptr;
1203
MOZ_ASSERT(!done());
1204
return;
1205
}
1206
1207
code_ = LookupCode(callerPC_, &codeRange_);
1208
1209
if (!code_ && uintptr_t(callerFP_) & ExitOrJitEntryFPTag) {
1210
// The parent frame is an inlined wasm call, the tagged FP points to
1211
// the fake exit frame.
1212
MOZ_ASSERT(!codeRange_);
1213
AssertDirectJitCall(callerFP_);
1214
unwoundIonCallerFP_ =
1215
(uint8_t*)(uintptr_t(callerFP_) & ~uintptr_t(ExitOrJitEntryFPTag));
1216
MOZ_ASSERT(done());
1217
return;
1218
}
1219
1220
MOZ_ASSERT(codeRange_);
1221
1222
if (codeRange_->isJitEntry()) {
1223
unwoundIonCallerFP_ = (uint8_t*)callerFP_;
1224
MOZ_ASSERT(!done());
1225
return;
1226
}
1227
1228
MOZ_ASSERT(code_ == &callerFP_->tls->instance->code());
1229
1230
switch (codeRange_->kind()) {
1231
case CodeRange::Function:
1232
case CodeRange::ImportJitExit:
1233
case CodeRange::ImportInterpExit:
1234
case CodeRange::BuiltinThunk:
1235
case CodeRange::TrapExit:
1236
case CodeRange::DebugTrap:
1237
case CodeRange::FarJumpIsland:
1238
stackAddress_ = callerFP_;
1239
callerPC_ = callerFP_->returnAddress;
1240
AssertMatchesCallSite(callerPC_, callerFP_->callerFP);
1241
callerFP_ = callerFP_->callerFP;
1242
break;
1243
case CodeRange::InterpEntry:
1244
MOZ_CRASH("should have had null caller fp");
1245
case CodeRange::JitEntry:
1246
MOZ_CRASH("should have been guarded above");
1247
case CodeRange::Throw:
1248
MOZ_CRASH("code range doesn't have frame");
1249
}
1250
1251
MOZ_ASSERT(!done());
1252
}
1253
1254
static const char* ThunkedNativeToDescription(SymbolicAddress func) {
1255
MOZ_ASSERT(NeedsBuiltinThunk(func));
1256
switch (func) {
1257
case SymbolicAddress::HandleDebugTrap:
1258
case SymbolicAddress::HandleThrow:
1259
case SymbolicAddress::HandleTrap:
1260
case SymbolicAddress::CallImport_Void:
1261
case SymbolicAddress::CallImport_I32:
1262
case SymbolicAddress::CallImport_I64:
1263
case SymbolicAddress::CallImport_F64:
1264
case SymbolicAddress::CallImport_FuncRef:
1265
case SymbolicAddress::CallImport_AnyRef:
1266
case SymbolicAddress::CoerceInPlace_ToInt32:
1267
case SymbolicAddress::CoerceInPlace_ToNumber:
1268
case SymbolicAddress::BoxValue_Anyref:
1269
MOZ_ASSERT(!NeedsBuiltinThunk(func),
1270
"not in sync with NeedsBuiltinThunk");
1271
break;
1272
case SymbolicAddress::ToInt32:
1273
return "call to asm.js native ToInt32 coercion (in wasm)";
1274
case SymbolicAddress::DivI64:
1275
return "call to native i64.div_s (in wasm)";
1276
case SymbolicAddress::UDivI64:
1277
return "call to native i64.div_u (in wasm)";
1278
case SymbolicAddress::ModI64:
1279
return "call to native i64.rem_s (in wasm)";
1280
case SymbolicAddress::UModI64:
1281
return "call to native i64.rem_u (in wasm)";
1282
case SymbolicAddress::TruncateDoubleToUint64:
1283
return "call to native i64.trunc_u/f64 (in wasm)";
1284
case SymbolicAddress::TruncateDoubleToInt64:
1285
return "call to native i64.trunc_s/f64 (in wasm)";
1286
case SymbolicAddress::SaturatingTruncateDoubleToUint64:
1287
return "call to native i64.trunc_u:sat/f64 (in wasm)";
1288
case SymbolicAddress::SaturatingTruncateDoubleToInt64:
1289
return "call to native i64.trunc_s:sat/f64 (in wasm)";
1290
case SymbolicAddress::Uint64ToDouble:
1291
return "call to native f64.convert_u/i64 (in wasm)";
1292
case SymbolicAddress::Uint64ToFloat32:
1293
return "call to native f32.convert_u/i64 (in wasm)";
1294
case SymbolicAddress::Int64ToDouble:
1295
return "call to native f64.convert_s/i64 (in wasm)";
1296
case SymbolicAddress::Int64ToFloat32:
1297
return "call to native f32.convert_s/i64 (in wasm)";
1298
#if defined(JS_CODEGEN_ARM)
1299
case SymbolicAddress::aeabi_idivmod:
1300
return "call to native i32.div_s (in wasm)";
1301
case SymbolicAddress::aeabi_uidivmod:
1302
return "call to native i32.div_u (in wasm)";
1303
#endif
1304
case SymbolicAddress::ModD:
1305
return "call to asm.js native f64 % (mod)";
1306
case SymbolicAddress::SinD:
1307
return "call to asm.js native f64 Math.sin";
1308
case SymbolicAddress::CosD:
1309
return "call to asm.js native f64 Math.cos";
1310
case SymbolicAddress::TanD:
1311
return "call to asm.js native f64 Math.tan";
1312
case SymbolicAddress::ASinD:
1313
return "call to asm.js native f64 Math.asin";
1314
case SymbolicAddress::ACosD:
1315
return "call to asm.js native f64 Math.acos";
1316
case SymbolicAddress::ATanD:
1317
return "call to asm.js native f64 Math.atan";
1318
case SymbolicAddress::CeilD:
1319
return "call to native f64.ceil (in wasm)";
1320
case SymbolicAddress::CeilF:
1321
return "call to native f32.ceil (in wasm)";
1322
case SymbolicAddress::FloorD:
1323
return "call to native f64.floor (in wasm)";
1324
case SymbolicAddress::FloorF:
1325
return "call to native f32.floor (in wasm)";
1326
case SymbolicAddress::TruncD:
1327
return "call to native f64.trunc (in wasm)";
1328
case SymbolicAddress::TruncF:
1329
return "call to native f32.trunc (in wasm)";
1330
case SymbolicAddress::NearbyIntD:
1331
return "call to native f64.nearest (in wasm)";
1332
case SymbolicAddress::NearbyIntF:
1333
return "call to native f32.nearest (in wasm)";
1334
case SymbolicAddress::ExpD:
1335
return "call to asm.js native f64 Math.exp";
1336
case SymbolicAddress::LogD:
1337
return "call to asm.js native f64 Math.log";
1338
case SymbolicAddress::PowD:
1339
return "call to asm.js native f64 Math.pow";
1340
case SymbolicAddress::ATan2D:
1341
return "call to asm.js native f64 Math.atan2";
1342
case SymbolicAddress::MemoryGrow:
1343
return "call to native memory.grow (in wasm)";
1344
case SymbolicAddress::MemorySize:
1345
return "call to native memory.size (in wasm)";
1346
case SymbolicAddress::WaitI32:
1347
return "call to native i32.wait (in wasm)";
1348
case SymbolicAddress::WaitI64:
1349
return "call to native i64.wait (in wasm)";
1350
case SymbolicAddress::Wake:
1351
return "call to native wake (in wasm)";
1352
case SymbolicAddress::CoerceInPlace_JitEntry:
1353
return "out-of-line coercion for jit entry arguments (in wasm)";
1354
case SymbolicAddress::ReportInt64JSCall:
1355
return "jit call to int64 wasm function";
1356
case SymbolicAddress::MemCopy:
1357
case SymbolicAddress::MemCopyShared:
1358
return "call to native memory.copy function";
1359
case SymbolicAddress::DataDrop:
1360
return "call to native data.drop function";
1361
case SymbolicAddress::MemFill:
1362
case SymbolicAddress::MemFillShared:
1363
return "call to native memory.fill function";
1364
case SymbolicAddress::MemInit:
1365
return "call to native memory.init function";
1366
case SymbolicAddress::TableCopy:
1367
return "call to native table.copy function";
1368
case SymbolicAddress::TableFill:
1369
return "call to native table.fill function";
1370
case SymbolicAddress::ElemDrop:
1371
return "call to native elem.drop function";
1372
case SymbolicAddress::TableGet:
1373
return "call to native table.get function";
1374
case SymbolicAddress::TableGrow:
1375
return "call to native table.grow function";
1376
case SymbolicAddress::TableInit:
1377
return "call to native table.init function";
1378
case SymbolicAddress::TableSet:
1379
return "call to native table.set function";
1380
case SymbolicAddress::TableSize:
1381
return "call to native table.size function";
1382
case SymbolicAddress::FuncRef:
1383
return "call to native func.ref function";
1384
case SymbolicAddress::PostBarrier:
1385
return "call to native GC postbarrier (in wasm)";
1386
case SymbolicAddress::PostBarrierFiltering:
1387
return "call to native filtering GC postbarrier (in wasm)";
1388
case SymbolicAddress::StructNew:
1389
return "call to native struct.new (in wasm)";
1390
case SymbolicAddress::StructNarrow:
1391
return "call to native struct.narrow (in wasm)";
1392
#if defined(JS_CODEGEN_MIPS32)
1393
case SymbolicAddress::js_jit_gAtomic64Lock:
1394
MOZ_CRASH();
1395
#endif
1396
#ifdef WASM_CODEGEN_DEBUG
1397
case SymbolicAddress::PrintI32:
1398
case SymbolicAddress::PrintPtr:
1399
case SymbolicAddress::PrintF32:
1400
case SymbolicAddress::PrintF64:
1401
case SymbolicAddress::PrintText:
1402
#endif
1403
case SymbolicAddress::Limit:
1404
break;
1405
}
1406
return "?";
1407
}
1408
1409
const char* ProfilingFrameIterator::label() const {
1410
MOZ_ASSERT(!done());
1411
1412
// Use the same string for both time inside and under so that the two
1413
// entries will be coalesced by the profiler.
1414
// Must be kept in sync with /tools/profiler/tests/test_asm.js
1415
static const char importJitDescription[] = "fast exit trampoline (in wasm)";
1416
static const char importInterpDescription[] =
1417
"slow exit trampoline (in wasm)";
1418
static const char builtinNativeDescription[] =
1419
"fast exit trampoline to native (in wasm)";
1420
static const char trapDescription[] = "trap handling (in wasm)";
1421
static const char debugTrapDescription[] = "debug trap handling (in wasm)";
1422
1423
if (!exitReason_.isFixed()) {
1424
return ThunkedNativeToDescription(exitReason_.symbolic());
1425
}
1426
1427
switch (exitReason_.fixed()) {
1428
case ExitReason::Fixed::None:
1429
break;
1430
case ExitReason::Fixed::ImportJit:
1431
return importJitDescription;
1432
case ExitReason::Fixed::ImportInterp:
1433
return importInterpDescription;
1434
case ExitReason::Fixed::BuiltinNative:
1435
return builtinNativeDescription;
1436
case ExitReason::Fixed::Trap:
1437
return trapDescription;
1438
case ExitReason::Fixed::DebugTrap:
1439
return debugTrapDescription;
1440
case ExitReason::Fixed::FakeInterpEntry:
1441
return "slow entry trampoline (in wasm)";
1442
}
1443
1444
switch (codeRange_->kind()) {
1445
case CodeRange::Function:
1446
return code_->profilingLabel(codeRange_->funcIndex());
1447
case CodeRange::InterpEntry:
1448
MOZ_CRASH("should be an ExitReason");
1449
case CodeRange::JitEntry:
1450
return "fast entry trampoline (in wasm)";
1451
case CodeRange::ImportJitExit:
1452
return importJitDescription;
1453
case CodeRange::BuiltinThunk:
1454
return builtinNativeDescription;
1455
case CodeRange::ImportInterpExit:
1456
return importInterpDescription;
1457
case CodeRange::TrapExit:
1458
return trapDescription;
1459
case CodeRange::DebugTrap:
1460
return debugTrapDescription;
1461
case CodeRange::FarJumpIsland:
1462
return "interstitial (in wasm)";
1463
case CodeRange::Throw:
1464
MOZ_CRASH("does not have a frame");
1465
}
1466
1467
MOZ_CRASH("bad code range kind");
1468
}