Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2015 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmStubs.h"
20
21
#include "mozilla/ArrayUtils.h"
22
23
#include <algorithm>
24
25
#include "jit/JitScript.h"
26
#include "jit/RegisterAllocator.h"
27
#include "js/Printf.h"
28
#include "util/Memory.h"
29
#include "wasm/WasmCode.h"
30
#include "wasm/WasmGenerator.h"
31
#include "wasm/WasmInstance.h"
32
33
#include "jit/MacroAssembler-inl.h"
34
35
using namespace js;
36
using namespace js::jit;
37
using namespace js::wasm;
38
39
using mozilla::ArrayLength;
40
41
typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
42
typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
43
typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
44
45
/*****************************************************************************/
46
// ABIResultIter implementation
47
48
static uint32_t ResultStackSize(ValType type) {
49
switch (type.code()) {
50
case ValType::I32:
51
return ABIResult::StackSizeOfInt32;
52
case ValType::I64:
53
return ABIResult::StackSizeOfInt64;
54
case ValType::F32:
55
return ABIResult::StackSizeOfFloat;
56
case ValType::F64:
57
return ABIResult::StackSizeOfDouble;
58
case ValType::Ref:
59
case ValType::FuncRef:
60
case ValType::AnyRef:
61
return ABIResult::StackSizeOfPtr;
62
case ValType::NullRef:
63
default:
64
MOZ_CRASH("Unexpected result type");
65
}
66
}
67
68
uint32_t ABIResult::size() const { return ResultStackSize(type()); }
69
70
void ABIResultIter::settleRegister(ValType type) {
71
MOZ_ASSERT(!done());
72
MOZ_ASSERT_IF(direction_ == Next, index() < RegisterResultCount);
73
MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - RegisterResultCount);
74
static_assert(RegisterResultCount == 1, "expected a single register result");
75
76
switch (type.code()) {
77
case ValType::I32:
78
cur_ = ABIResult(type, ReturnReg);
79
break;
80
case ValType::I64:
81
cur_ = ABIResult(type, ReturnReg64);
82
break;
83
case ValType::F32:
84
cur_ = ABIResult(type, ReturnFloat32Reg);
85
break;
86
case ValType::F64:
87
cur_ = ABIResult(type, ReturnDoubleReg);
88
break;
89
case ValType::Ref:
90
case ValType::FuncRef:
91
case ValType::AnyRef:
92
cur_ = ABIResult(type, ReturnReg);
93
break;
94
case ValType::NullRef:
95
default:
96
MOZ_CRASH("Unexpected result type");
97
}
98
}
99
100
void ABIResultIter::settleNext() {
101
MOZ_ASSERT(direction_ == Next);
102
MOZ_ASSERT(!done());
103
104
uint32_t typeIndex = count_ - index_ - 1;
105
ValType type = type_[typeIndex];
106
107
if (index_ < RegisterResultCount) {
108
settleRegister(type);
109
return;
110
}
111
112
cur_ = ABIResult(type, nextStackOffset_);
113
nextStackOffset_ += ResultStackSize(type);
114
}
115
116
void ABIResultIter::settlePrev() {
117
MOZ_ASSERT(direction_ == Prev);
118
MOZ_ASSERT(!done());
119
uint32_t typeIndex = index_;
120
ValType type = type_[typeIndex];
121
122
if (count_ - index_ - 1 < RegisterResultCount) {
123
settleRegister(type);
124
return;
125
}
126
127
uint32_t size = ResultStackSize(type);
128
MOZ_ASSERT(nextStackOffset_ >= size);
129
nextStackOffset_ -= size;
130
cur_ = ABIResult(type, nextStackOffset_);
131
}
132
133
#ifdef WASM_CODEGEN_DEBUG
134
template <class Closure>
135
static void GenPrint(DebugChannel channel, MacroAssembler& masm,
136
const Maybe<Register>& taken, Closure passArgAndCall) {
137
if (!IsCodegenDebugEnabled(channel)) {
138
return;
139
}
140
141
AllocatableRegisterSet regs(RegisterSet::All());
142
LiveRegisterSet save(regs.asLiveSet());
143
masm.PushRegsInMask(save);
144
145
if (taken) {
146
regs.take(taken.value());
147
}
148
Register temp = regs.takeAnyGeneral();
149
150
{
151
MOZ_ASSERT(MaybeGetJitContext(),
152
"codegen debug checks require a jit context");
153
masm.setupUnalignedABICall(temp);
154
passArgAndCall(IsCompilingWasm(), temp);
155
}
156
157
masm.PopRegsInMask(save);
158
}
159
160
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
161
const char* fmt, ...) {
162
va_list ap;
163
va_start(ap, fmt);
164
UniqueChars str = JS_vsmprintf(fmt, ap);
165
va_end(ap);
166
167
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
168
// If we've gone this far, it means we're actually using the debugging
169
// strings. In this case, we leak them! This is only for debugging, and
170
// doing the right thing is cumbersome (in Ion, it'd mean add a vec of
171
// strings to the IonScript; in wasm, it'd mean add it to the current
172
// Module and serialize it properly).
173
const char* text = str.release();
174
175
masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
176
masm.passABIArg(temp);
177
if (inWasm) {
178
masm.callDebugWithABI(SymbolicAddress::PrintText);
179
} else {
180
masm.callWithABI((void*)PrintText, MoveOp::GENERAL,
181
CheckUnsafeCallWithABI::DontCheckOther);
182
}
183
});
184
}
185
186
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
187
const Register& src) {
188
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
189
masm.passABIArg(src);
190
if (inWasm) {
191
masm.callDebugWithABI(SymbolicAddress::PrintI32);
192
} else {
193
masm.callWithABI((void*)PrintI32, MoveOp::GENERAL,
194
CheckUnsafeCallWithABI::DontCheckOther);
195
}
196
});
197
}
198
199
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
200
const Register& src) {
201
GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
202
masm.passABIArg(src);
203
if (inWasm) {
204
masm.callDebugWithABI(SymbolicAddress::PrintPtr);
205
} else {
206
masm.callWithABI((void*)PrintPtr, MoveOp::GENERAL,
207
CheckUnsafeCallWithABI::DontCheckOther);
208
}
209
});
210
}
211
212
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
213
const Register64& src) {
214
# if JS_BITS_PER_WORD == 64
215
GenPrintf(channel, masm, "i64 ");
216
GenPrintIsize(channel, masm, src.reg);
217
# else
218
GenPrintf(channel, masm, "i64(");
219
GenPrintIsize(channel, masm, src.low);
220
GenPrintIsize(channel, masm, src.high);
221
GenPrintf(channel, masm, ") ");
222
# endif
223
}
224
225
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
226
const FloatRegister& src) {
227
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
228
masm.passABIArg(src, MoveOp::FLOAT32);
229
if (inWasm) {
230
masm.callDebugWithABI(SymbolicAddress::PrintF32);
231
} else {
232
masm.callWithABI((void*)PrintF32, MoveOp::GENERAL,
233
CheckUnsafeCallWithABI::DontCheckOther);
234
}
235
});
236
}
237
238
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
239
const FloatRegister& src) {
240
GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
241
masm.passABIArg(src, MoveOp::DOUBLE);
242
if (inWasm) {
243
masm.callDebugWithABI(SymbolicAddress::PrintF64);
244
} else {
245
masm.callWithABI((void*)PrintF64, MoveOp::GENERAL,
246
CheckUnsafeCallWithABI::DontCheckOther);
247
}
248
});
249
}
250
#else
251
static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
252
const char* fmt, ...) {}
253
static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
254
const Register& src) {}
255
static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
256
const Register& src) {}
257
static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
258
const Register64& src) {}
259
static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
260
const FloatRegister& src) {}
261
static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
262
const FloatRegister& src) {}
263
#endif
264
265
static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
266
// On old ARM hardware, constant pools could be inserted and they need to
267
// be flushed before considering the size of the masm.
268
masm.flushBuffer();
269
offsets->end = masm.size();
270
return !masm.oom();
271
}
272
273
static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
274
uint32_t addBeforeAssert = 0) {
275
MOZ_ASSERT(
276
(sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
277
masm.assertStackAlignment(alignment, addBeforeAssert);
278
}
279
280
template <class VectorT>
281
static unsigned StackArgBytes(const VectorT& args) {
282
ABIArgIter<VectorT> iter(args);
283
while (!iter.done()) {
284
iter++;
285
}
286
return iter.stackBytesConsumedSoFar();
287
}
288
289
static void Move64(MacroAssembler& masm, const Address& src,
290
const Address& dest, Register scratch) {
291
#if JS_BITS_PER_WORD == 32
292
masm.load32(LowWord(src), scratch);
293
masm.store32(scratch, LowWord(dest));
294
masm.load32(HighWord(src), scratch);
295
masm.store32(scratch, HighWord(dest));
296
#else
297
Register64 scratch64(scratch);
298
masm.load64(src, scratch64);
299
masm.store64(scratch64, dest);
300
#endif
301
}
302
303
static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
304
Register argv, Register scratch) {
305
// Copy parameters out of argv and into the registers/stack-slots specified by
306
// the system ABI.
307
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
308
unsigned argOffset = iter.index() * sizeof(ExportArg);
309
Address src(argv, argOffset);
310
MIRType type = iter.mirType();
311
switch (iter->kind()) {
312
case ABIArg::GPR:
313
if (type == MIRType::Int32) {
314
masm.load32(src, iter->gpr());
315
} else if (type == MIRType::Int64) {
316
masm.load64(src, iter->gpr64());
317
} else if (type == MIRType::RefOrNull) {
318
masm.loadPtr(src, iter->gpr());
319
} else {
320
MOZ_CRASH("unknown GPR type");
321
}
322
break;
323
#ifdef JS_CODEGEN_REGISTER_PAIR
324
case ABIArg::GPR_PAIR:
325
if (type == MIRType::Int64) {
326
masm.load64(src, iter->gpr64());
327
} else {
328
MOZ_CRASH("wasm uses hardfp for function calls.");
329
}
330
break;
331
#endif
332
case ABIArg::FPU: {
333
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
334
"ExportArg must be big enough to store SIMD values");
335
switch (type) {
336
case MIRType::Double:
337
masm.loadDouble(src, iter->fpu());
338
break;
339
case MIRType::Float32:
340
masm.loadFloat32(src, iter->fpu());
341
break;
342
default:
343
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
344
break;
345
}
346
break;
347
}
348
case ABIArg::Stack:
349
switch (type) {
350
case MIRType::Int32:
351
masm.load32(src, scratch);
352
masm.storePtr(scratch, Address(masm.getStackPointer(),
353
iter->offsetFromArgBase()));
354
break;
355
case MIRType::Int64: {
356
RegisterOrSP sp = masm.getStackPointer();
357
Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
358
break;
359
}
360
case MIRType::RefOrNull:
361
masm.loadPtr(src, scratch);
362
masm.storePtr(scratch, Address(masm.getStackPointer(),
363
iter->offsetFromArgBase()));
364
break;
365
case MIRType::Double: {
366
ScratchDoubleScope fpscratch(masm);
367
masm.loadDouble(src, fpscratch);
368
masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
369
iter->offsetFromArgBase()));
370
break;
371
}
372
case MIRType::Float32: {
373
ScratchFloat32Scope fpscratch(masm);
374
masm.loadFloat32(src, fpscratch);
375
masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
376
iter->offsetFromArgBase()));
377
break;
378
}
379
default:
380
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
381
"unexpected stack arg type");
382
}
383
break;
384
case ABIArg::Uninitialized:
385
MOZ_CRASH("Uninitialized ABIArg kind");
386
}
387
}
388
}
389
390
static void StoreABIReturn(MacroAssembler& masm, const FuncExport& fe,
391
Register argv) {
392
// Store the return value in argv[0].
393
const ValTypeVector& results = fe.funcType().results();
394
if (results.length() == 0) {
395
return;
396
}
397
MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
398
switch (results[0].code()) {
399
case ValType::I32:
400
masm.store32(ReturnReg, Address(argv, 0));
401
break;
402
case ValType::I64:
403
masm.store64(ReturnReg64, Address(argv, 0));
404
break;
405
case ValType::F32:
406
masm.canonicalizeFloat(ReturnFloat32Reg);
407
masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
408
break;
409
case ValType::F64:
410
masm.canonicalizeDouble(ReturnDoubleReg);
411
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
412
break;
413
case ValType::Ref:
414
case ValType::FuncRef:
415
case ValType::AnyRef:
416
masm.storePtr(ReturnReg, Address(argv, 0));
417
break;
418
case ValType::NullRef:
419
MOZ_CRASH("NullRef not expressible");
420
}
421
}
422
423
#if defined(JS_CODEGEN_ARM)
424
// The ARM system ABI also includes d15 & s31 in the non volatile float
425
// registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
426
static const LiveRegisterSet NonVolatileRegs =
427
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask &
428
~(uint32_t(1) << Registers::lr)),
429
FloatRegisterSet(FloatRegisters::NonVolatileMask |
430
(1ULL << FloatRegisters::d15) |
431
(1ULL << FloatRegisters::s31)));
432
#elif defined(JS_CODEGEN_ARM64)
433
// Exclude the Link Register (x30) because it is preserved manually.
434
//
435
// Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
436
// Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
437
static const LiveRegisterSet NonVolatileRegs =
438
LiveRegisterSet(GeneralRegisterSet((Registers::NonVolatileMask &
439
~(uint32_t(1) << Registers::lr)) |
440
(uint32_t(1) << Registers::x16)),
441
FloatRegisterSet(FloatRegisters::NonVolatileMask |
442
FloatRegisters::NonAllocatableMask));
443
#else
444
static const LiveRegisterSet NonVolatileRegs =
445
LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
446
FloatRegisterSet(FloatRegisters::NonVolatileMask));
447
#endif
448
449
#if defined(JS_CODEGEN_NONE)
450
static const unsigned NonVolatileRegsPushSize = 0;
451
#else
452
static const unsigned NonVolatileRegsPushSize =
453
NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
454
NonVolatileRegs.fpus().getPushSizeInBytes();
455
#endif
456
457
#ifdef ENABLE_WASM_REFTYPES
458
static const unsigned NumExtraPushed = 2; // tls and argv
459
#else
460
static const unsigned NumExtraPushed = 1; // argv
461
#endif
462
463
#ifdef JS_CODEGEN_ARM64
464
static const unsigned WasmPushSize = 16;
465
#else
466
static const unsigned WasmPushSize = sizeof(void*);
467
#endif
468
469
static const unsigned FramePushedBeforeAlign =
470
NonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
471
472
static void AssertExpectedSP(const MacroAssembler& masm) {
473
#ifdef JS_CODEGEN_ARM64
474
MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
475
#endif
476
}
477
478
template <class Operand>
479
static void WasmPush(MacroAssembler& masm, const Operand& op) {
480
#ifdef JS_CODEGEN_ARM64
481
// Allocate a pad word so that SP can remain properly aligned. |op| will be
482
// written at the lower-addressed of the two words pushed here.
483
masm.reserveStack(WasmPushSize);
484
masm.storePtr(op, Address(masm.getStackPointer(), 0));
485
#else
486
masm.Push(op);
487
#endif
488
}
489
490
static void WasmPop(MacroAssembler& masm, Register r) {
491
#ifdef JS_CODEGEN_ARM64
492
// Also pop the pad word allocated by WasmPush.
493
masm.loadPtr(Address(masm.getStackPointer(), 0), r);
494
masm.freeStack(WasmPushSize);
495
#else
496
masm.Pop(r);
497
#endif
498
}
499
500
static void MoveSPForJitABI(MacroAssembler& masm) {
501
#ifdef JS_CODEGEN_ARM64
502
masm.moveStackPtrTo(PseudoStackPointer);
503
#endif
504
}
505
506
static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
507
const Maybe<ImmPtr>& funcPtr) {
508
MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
509
if (funcPtr) {
510
masm.call(*funcPtr);
511
} else {
512
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
513
}
514
}
515
516
STATIC_ASSERT_ANYREF_IS_JSOBJECT; // Strings are currently boxed
517
518
// Unboxing is branchy and contorted because of Spectre mitigations - we don't
519
// have enough scratch registers. Were it not for the spectre mitigations in
520
// branchTestObjClass, the branch nest below would be restructured significantly
521
// by inverting branches and using fewer registers.
522
523
// Unbox an anyref in src (clobbering src in the process) and then re-box it as
524
// a Value in *dst. See the definition of AnyRef for a discussion of pointer
525
// representation.
526
static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register tls,
527
Register src, const Address& dst,
528
Register scratch) {
529
MOZ_ASSERT(src != scratch);
530
531
// Not actually the value we're passing, but we've no way of
532
// decoding anything better.
533
GenPrintPtr(DebugChannel::Import, masm, src);
534
535
Label notNull, mustUnbox, done;
536
masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
537
masm.storeValue(NullValue(), dst);
538
masm.jump(&done);
539
540
masm.bind(&notNull);
541
// The type test will clear src if the test fails, so store early.
542
masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
543
// Spectre mitigations: see comment above about efficiency.
544
masm.branchTestObjClass(Assembler::Equal, src,
545
Address(tls, offsetof(TlsData, valueBoxClass)),
546
scratch, src, &mustUnbox);
547
masm.jump(&done);
548
549
masm.bind(&mustUnbox);
550
Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
551
552
masm.bind(&done);
553
}
554
555
// Unbox an anyref in src and then re-box it as a Value in dst.
556
// See the definition of AnyRef for a discussion of pointer representation.
557
static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register tls,
558
Register src, ValueOperand dst,
559
Register scratch) {
560
MOZ_ASSERT(src != scratch);
561
#if JS_BITS_PER_WORD == 32
562
MOZ_ASSERT(dst.typeReg() != scratch);
563
MOZ_ASSERT(dst.payloadReg() != scratch);
564
#else
565
MOZ_ASSERT(dst.valueReg() != scratch);
566
#endif
567
568
// Not actually the value we're passing, but we've no way of
569
// decoding anything better.
570
GenPrintPtr(DebugChannel::Import, masm, src);
571
572
Label notNull, mustUnbox, done;
573
masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
574
masm.moveValue(NullValue(), dst);
575
masm.jump(&done);
576
577
masm.bind(&notNull);
578
// The type test will clear src if the test fails, so store early.
579
masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
580
// Spectre mitigations: see comment above about efficiency.
581
masm.branchTestObjClass(Assembler::Equal, src,
582
Address(tls, offsetof(TlsData, valueBoxClass)),
583
scratch, src, &mustUnbox);
584
masm.jump(&done);
585
586
masm.bind(&mustUnbox);
587
masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
588
589
masm.bind(&done);
590
}
591
592
// Box the Value in src as an anyref in dest. src and dest must not overlap.
593
// See the definition of AnyRef for a discussion of pointer representation.
594
static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
595
Register dest, Label* oolConvert) {
596
Label nullValue, objectValue, done;
597
{
598
ScratchTagScope tag(masm, src);
599
masm.splitTagForTest(src, tag);
600
masm.branchTestObject(Assembler::Equal, tag, &objectValue);
601
masm.branchTestNull(Assembler::Equal, tag, &nullValue);
602
masm.jump(oolConvert);
603
}
604
605
masm.bind(&nullValue);
606
masm.xorPtr(dest, dest);
607
masm.jump(&done);
608
609
masm.bind(&objectValue);
610
masm.unboxObject(src, dest);
611
612
masm.bind(&done);
613
}
614
615
// Generate a stub that enters wasm from a C++ caller via the native ABI. The
616
// signature of the entry point is Module::ExportFuncPtr. The exported wasm
617
// function has an ABI derived from its specific signature, so this function
618
// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
619
static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
620
const Maybe<ImmPtr>& funcPtr,
621
Offsets* offsets) {
622
AssertExpectedSP(masm);
623
masm.haltingAlign(CodeAlignment);
624
625
offsets->begin = masm.currentOffset();
626
627
// Save the return address if it wasn't already saved by the call insn.
628
#ifdef JS_USE_LINK_REGISTER
629
# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
630
defined(JS_CODEGEN_MIPS64)
631
masm.pushReturnAddress();
632
# elif defined(JS_CODEGEN_ARM64)
633
// WasmPush updates framePushed() unlike pushReturnAddress(), but that's
634
// cancelled by the setFramePushed() below.
635
WasmPush(masm, lr);
636
# else
637
MOZ_CRASH("Implement this");
638
# endif
639
#endif
640
641
// Save all caller non-volatile registers before we clobber them here and in
642
// the wasm callee (which does not preserve non-volatile registers).
643
masm.setFramePushed(0);
644
masm.PushRegsInMask(NonVolatileRegs);
645
MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
646
647
// Put the 'argv' argument into a non-argument/return/TLS register so that
648
// we can use 'argv' while we fill in the arguments for the wasm callee.
649
// Use a second non-argument/return register as temporary scratch.
650
Register argv = ABINonArgReturnReg0;
651
Register scratch = ABINonArgReturnReg1;
652
653
// Read the arguments of wasm::ExportFuncPtr according to the native ABI.
654
// The entry stub's frame is 1 word.
655
const unsigned argBase = sizeof(void*) + masm.framePushed();
656
ABIArgGenerator abi;
657
ABIArg arg;
658
659
// arg 1: ExportArg*
660
arg = abi.next(MIRType::Pointer);
661
if (arg.kind() == ABIArg::GPR) {
662
masm.movePtr(arg.gpr(), argv);
663
} else {
664
masm.loadPtr(
665
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
666
argv);
667
}
668
669
// Arg 2: TlsData*
670
arg = abi.next(MIRType::Pointer);
671
if (arg.kind() == ABIArg::GPR) {
672
masm.movePtr(arg.gpr(), WasmTlsReg);
673
} else {
674
masm.loadPtr(
675
Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
676
WasmTlsReg);
677
}
678
679
#ifdef ENABLE_WASM_REFTYPES
680
WasmPush(masm, WasmTlsReg);
681
#endif
682
683
// Save 'argv' on the stack so that we can recover it after the call.
684
WasmPush(masm, argv);
685
686
// Since we're about to dynamically align the stack, reset the frame depth
687
// so we can still assert static stack depth balancing.
688
MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
689
masm.setFramePushed(0);
690
691
// Dynamically align the stack since ABIStackAlignment is not necessarily
692
// WasmStackAlignment. Preserve SP so it can be restored after the call.
693
#ifdef JS_CODEGEN_ARM64
694
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
695
#else
696
masm.moveStackPtrTo(scratch);
697
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
698
masm.Push(scratch);
699
#endif
700
701
// Reserve stack space for the call.
702
unsigned argDecrement =
703
StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
704
StackArgBytes(fe.funcType().args()));
705
masm.reserveStack(argDecrement);
706
707
// Copy parameters out of argv and into the wasm ABI registers/stack-slots.
708
SetupABIArguments(masm, fe, argv, scratch);
709
710
// Setup wasm register state. The nullness of the frame pointer is used to
711
// determine whether the call ended in success or failure.
712
masm.movePtr(ImmWord(0), FramePointer);
713
masm.loadWasmPinnedRegsFromTls();
714
715
// Call into the real function. Note that, due to the throw stub, fp, tls
716
// and pinned registers may be clobbered.
717
masm.assertStackAlignment(WasmStackAlignment);
718
CallFuncExport(masm, fe, funcPtr);
719
masm.assertStackAlignment(WasmStackAlignment);
720
721
// Pop the arguments pushed after the dynamic alignment.
722
masm.freeStack(argDecrement);
723
724
// Pop the stack pointer to its value right before dynamic alignment.
725
#ifdef JS_CODEGEN_ARM64
726
static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
727
#else
728
masm.PopStackPtr();
729
#endif
730
MOZ_ASSERT(masm.framePushed() == 0);
731
masm.setFramePushed(FramePushedBeforeAlign);
732
733
// Recover the 'argv' pointer which was saved before aligning the stack.
734
WasmPop(masm, argv);
735
736
#ifdef ENABLE_WASM_REFTYPES
737
WasmPop(masm, WasmTlsReg);
738
#endif
739
740
// Store the return value in argv[0].
741
StoreABIReturn(masm, fe, argv);
742
743
// After the ReturnReg is stored into argv[0] but before fp is clobbered by
744
// the PopRegsInMask(NonVolatileRegs) below, set the return value based on
745
// whether fp is null (which is the case for successful returns) or the
746
// FailFP magic value (set by the throw stub);
747
Label success, join;
748
masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
749
#ifdef DEBUG
750
Label ok;
751
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
752
masm.breakpoint();
753
masm.bind(&ok);
754
#endif
755
masm.move32(Imm32(false), ReturnReg);
756
masm.jump(&join);
757
masm.bind(&success);
758
masm.move32(Imm32(true), ReturnReg);
759
masm.bind(&join);
760
761
// Restore clobbered non-volatile registers of the caller.
762
masm.PopRegsInMask(NonVolatileRegs);
763
MOZ_ASSERT(masm.framePushed() == 0);
764
765
#if defined(JS_CODEGEN_ARM64)
766
masm.setFramePushed(WasmPushSize);
767
WasmPop(masm, lr);
768
masm.abiret();
769
#else
770
masm.ret();
771
#endif
772
773
return FinishOffsets(masm, offsets);
774
}
775
776
#ifdef JS_PUNBOX64
777
static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
778
#else
779
static const ValueOperand ScratchValIonEntry =
780
ValueOperand(ABINonArgReg0, ABINonArgReg1);
781
#endif
782
static const Register ScratchIonEntry = ABINonArgReg2;
783
784
static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
785
SymbolicAddress sym) {
786
if (isAbsolute) {
787
masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
788
} else {
789
masm.call(sym);
790
}
791
}
792
793
// Load instance's TLS from the callee.
794
static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
795
AssertExpectedSP(masm);
796
797
// ScratchIonEntry := callee => JSFunction*
798
unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
799
masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
800
ScratchIonEntry);
801
802
// ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
803
// => TlsData*
804
offset = FunctionExtended::offsetOfExtendedSlot(
805
FunctionExtended::WASM_TLSDATA_SLOT);
806
masm.loadPrivate(Address(ScratchIonEntry, offset), WasmTlsReg);
807
}
808
809
// Creates a JS fake exit frame for wasm, so the frame iterators just use
810
// JSJit frame iteration.
811
static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
812
AssertExpectedSP(masm);
813
814
MOZ_ASSERT(masm.framePushed() == frameSize);
815
816
GenerateJitEntryLoadTls(masm, frameSize);
817
818
masm.freeStack(frameSize);
819
MoveSPForJitABI(masm);
820
821
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
822
masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
823
ExitFrameType::WasmGenericJitEntry);
824
825
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
826
ScratchIonEntry);
827
masm.loadPtr(
828
Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
829
ScratchIonEntry);
830
masm.jump(ScratchIonEntry);
831
}
832
833
// Helper function for allocating a BigInt and initializing it from an I64
834
// in GenerateJitEntry and GenerateImportInterpExit. The return result is
835
// written to scratch.
836
#ifdef ENABLE_WASM_BIGINT
837
static void GenerateBigIntInitialization(MacroAssembler& masm, unsigned offset,
838
Register64 input, Register scratch,
839
const FuncExport* fe, Label* fail) {
840
# if JS_BITS_PER_WORD == 32
841
MOZ_ASSERT(input.low != scratch);
842
MOZ_ASSERT(input.high != scratch);
843
# else
844
MOZ_ASSERT(input.reg != scratch);
845
# endif
846
847
// We need to avoid clobbering other argument registers and the input.
848
AllocatableRegisterSet regs(RegisterSet::Volatile());
849
LiveRegisterSet save(regs.asLiveSet());
850
masm.PushRegsInMask(save);
851
852
unsigned frameSize =
853
StackDecrementForCall(ABIStackAlignment, masm.framePushed() + offset, 0);
854
masm.reserveStack(frameSize);
855
masm.assertStackAlignment(ABIStackAlignment);
856
857
// Needs to use a different call type depending on stub it's used from.
858
if (fe) {
859
CallSymbolicAddress(masm, !fe->hasEagerStubs(),
860
SymbolicAddress::AllocateBigInt);
861
} else {
862
masm.call(SymbolicAddress::AllocateBigInt);
863
}
864
masm.storeCallPointerResult(scratch);
865
masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
866
867
masm.assertStackAlignment(ABIStackAlignment);
868
masm.freeStack(frameSize);
869
870
LiveRegisterSet ignore;
871
ignore.add(scratch);
872
masm.PopRegsInMaskIgnore(save, ignore);
873
874
masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
875
}
876
#endif
877
878
// Generate a stub that enters wasm from a jit code caller via the jit ABI.
879
//
880
// ARM64 note: This does not save the PseudoStackPointer so we must be sure to
881
// recompute it on every return path, be it normal return or exception return.
882
// The JIT code we return to assumes it is correct.
883
884
static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
885
const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
886
bool bigIntEnabled, Offsets* offsets) {
887
AssertExpectedSP(masm);
888
889
RegisterOrSP sp = masm.getStackPointer();
890
891
GenerateJitEntryPrologue(masm, offsets);
892
893
// The jit caller has set up the following stack layout (sp grows to the
894
// left):
895
// <-- retAddr | descriptor | callee | argc | this | arg1..N
896
897
unsigned normalBytesNeeded = StackArgBytes(fe.funcType().args());
898
899
MIRTypeVector coerceArgTypes;
900
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
901
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
902
MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
903
unsigned oolBytesNeeded = StackArgBytes(coerceArgTypes);
904
905
unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
906
907
// Note the jit caller ensures the stack is aligned *after* the call
908
// instruction.
909
unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
910
masm.framePushed(), bytesNeeded);
911
912
// Reserve stack space for wasm ABI arguments, set up like this:
913
// <-- ABI args | padding
914
masm.reserveStack(frameSize);
915
916
GenerateJitEntryLoadTls(masm, frameSize);
917
918
if (fe.funcType().hasI64ArgOrRet() && !bigIntEnabled) {
919
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
920
SymbolicAddress::ReportInt64JSCall);
921
GenerateJitEntryThrow(masm, frameSize);
922
return FinishOffsets(masm, offsets);
923
}
924
925
FloatRegister scratchF = ABINonArgDoubleReg;
926
Register scratchG = ScratchIonEntry;
927
ValueOperand scratchV = ScratchValIonEntry;
928
929
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
930
fe.funcIndex());
931
932
// We do two loops:
933
// - one loop up-front will make sure that all the Value tags fit the
934
// expected signature argument types. If at least one inline conversion
935
// fails, we just jump to the OOL path which will call into C++. Inline
936
// conversions are ordered in the way we expect them to happen the most.
937
// - the second loop will unbox the arguments into the right registers.
938
Label oolCall;
939
for (size_t i = 0; i < fe.funcType().args().length(); i++) {
940
unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
941
Address jitArgAddr(sp, jitArgOffset);
942
masm.loadValue(jitArgAddr, scratchV);
943
944
Label next;
945
switch (fe.funcType().args()[i].code()) {
946
case ValType::I32: {
947
ScratchTagScope tag(masm, scratchV);
948
masm.splitTagForTest(scratchV, tag);
949
950
// For int32 inputs, just skip.
951
masm.branchTestInt32(Assembler::Equal, tag, &next);
952
953
// For double inputs, unbox, truncate and store back.
954
Label storeBack, notDouble;
955
masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
956
{
957
ScratchTagScopeRelease _(&tag);
958
masm.unboxDouble(scratchV, scratchF);
959
masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
960
masm.jump(&storeBack);
961
}
962
masm.bind(&notDouble);
963
964
// For null or undefined, store 0.
965
Label nullOrUndefined, notNullOrUndefined;
966
masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
967
masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
968
masm.bind(&nullOrUndefined);
969
{
970
ScratchTagScopeRelease _(&tag);
971
masm.storeValue(Int32Value(0), jitArgAddr);
972
}
973
masm.jump(&next);
974
masm.bind(&notNullOrUndefined);
975
976
// For booleans, store the number value back. Other types (symbol,
977
// object, strings) go to the C++ call.
978
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
979
masm.unboxBoolean(scratchV, scratchG);
980
// fallthrough:
981
982
masm.bind(&storeBack);
983
{
984
ScratchTagScopeRelease _(&tag);
985
masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
986
}
987
break;
988
}
989
#ifdef ENABLE_WASM_BIGINT
990
case ValType::I64: {
991
ScratchTagScope tag(masm, scratchV);
992
masm.splitTagForTest(scratchV, tag);
993
994
// For BigInt inputs, just skip. Otherwise go to C++ for other
995
// types that require creating a new BigInt or erroring.
996
masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
997
masm.jump(&next);
998
break;
999
}
1000
#endif
1001
case ValType::F32:
1002
case ValType::F64: {
1003
// Note we can reuse the same code for f32/f64 here, since for the
1004
// case of f32, the conversion of f64 to f32 will happen in the
1005
// second loop.
1006
ScratchTagScope tag(masm, scratchV);
1007
masm.splitTagForTest(scratchV, tag);
1008
1009
// For double inputs, just skip.
1010
masm.branchTestDouble(Assembler::Equal, tag, &next);
1011
1012
// For int32 inputs, convert and rebox.
1013
Label storeBack, notInt32;
1014
{
1015
ScratchTagScopeRelease _(&tag);
1016
masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
1017
masm.int32ValueToDouble(scratchV, scratchF);
1018
masm.jump(&storeBack);
1019
}
1020
masm.bind(&notInt32);
1021
1022
// For undefined (missing argument), store NaN.
1023
Label notUndefined;
1024
masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
1025
{
1026
ScratchTagScopeRelease _(&tag);
1027
masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
1028
masm.jump(&next);
1029
}
1030
masm.bind(&notUndefined);
1031
1032
// +null is 0.
1033
Label notNull;
1034
masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
1035
{
1036
ScratchTagScopeRelease _(&tag);
1037
masm.storeValue(DoubleValue(0.), jitArgAddr);
1038
}
1039
masm.jump(&next);
1040
masm.bind(&notNull);
1041
1042
// For booleans, store the number value back. Other types (symbol,
1043
// object, strings) go to the C++ call.
1044
masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
1045
masm.boolValueToDouble(scratchV, scratchF);
1046
// fallthrough:
1047
1048
masm.bind(&storeBack);
1049
{
1050
ScratchTagScopeRelease _(&tag);
1051
masm.boxDouble(scratchF, jitArgAddr);
1052
}
1053
break;
1054
}
1055
case ValType::AnyRef: {
1056
ScratchTagScope tag(masm, scratchV);
1057
masm.splitTagForTest(scratchV, tag);
1058
1059
// For object inputs, we handle object and null inline, everything else
1060
// requires an actual box and we go out of line to allocate that.
1061
masm.branchTestObject(Assembler::Equal, tag, &next);
1062
masm.branchTestNull(Assembler::Equal, tag, &next);
1063
masm.jump(&oolCall);
1064
break;
1065
}
1066
default: {
1067
MOZ_CRASH("unexpected argument type when calling from the jit");
1068
}
1069
}
1070
masm.nopAlign(CodeAlignment);
1071
masm.bind(&next);
1072
}
1073
1074
Label rejoinBeforeCall;
1075
masm.bind(&rejoinBeforeCall);
1076
1077
// Convert all the expected values to unboxed values on the stack.
1078
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
1079
unsigned jitArgOffset =
1080
frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
1081
Address argv(sp, jitArgOffset);
1082
bool isStackArg = iter->kind() == ABIArg::Stack;
1083
switch (iter.mirType()) {
1084
case MIRType::Int32: {
1085
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1086
masm.unboxInt32(argv, target);
1087
GenPrintIsize(DebugChannel::Function, masm, target);
1088
if (isStackArg) {
1089
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1090
}
1091
break;
1092
}
1093
#ifdef ENABLE_WASM_BIGINT
1094
case MIRType::Int64: {
1095
// The coercion has provided a BigInt value by this point, which
1096
// we need to convert to an I64 here.
1097
if (isStackArg) {
1098
Address dst(sp, iter->offsetFromArgBase());
1099
Register src = scratchV.payloadOrValueReg();
1100
# if JS_BITS_PER_WORD == 64
1101
Register64 scratch64(scratchG);
1102
# else
1103
Register64 scratch64(scratchG, ABINonArgReg3);
1104
# endif
1105
masm.unboxBigInt(argv, src);
1106
masm.loadBigInt64(src, scratch64);
1107
GenPrintI64(DebugChannel::Function, masm, scratch64);
1108
masm.store64(scratch64, dst);
1109
} else {
1110
Register src = scratchG;
1111
Register64 target = iter->gpr64();
1112
masm.unboxBigInt(argv, src);
1113
masm.loadBigInt64(src, target);
1114
GenPrintI64(DebugChannel::Function, masm, target);
1115
}
1116
break;
1117
}
1118
#endif
1119
case MIRType::Float32: {
1120
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1121
masm.unboxDouble(argv, ABINonArgDoubleReg);
1122
masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
1123
GenPrintF32(DebugChannel::Function, masm, target.asSingle());
1124
if (isStackArg) {
1125
masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
1126
}
1127
break;
1128
}
1129
case MIRType::Double: {
1130
FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
1131
masm.unboxDouble(argv, target);
1132
GenPrintF64(DebugChannel::Function, masm, target);
1133
if (isStackArg) {
1134
masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
1135
}
1136
break;
1137
}
1138
case MIRType::RefOrNull: {
1139
Register target = isStackArg ? ScratchIonEntry : iter->gpr();
1140
masm.unboxObjectOrNull(argv, target);
1141
GenPrintPtr(DebugChannel::Function, masm, target);
1142
if (isStackArg) {
1143
masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
1144
}
1145
break;
1146
}
1147
default: {
1148
MOZ_CRASH("unexpected input argument when calling from jit");
1149
}
1150
}
1151
}
1152
1153
GenPrintf(DebugChannel::Function, masm, "\n");
1154
1155
// Setup wasm register state.
1156
masm.loadWasmPinnedRegsFromTls();
1157
1158
// Call into the real function. Note that, due to the throw stub, fp, tls
1159
// and pinned registers may be clobbered.
1160
masm.assertStackAlignment(WasmStackAlignment);
1161
CallFuncExport(masm, fe, funcPtr);
1162
masm.assertStackAlignment(WasmStackAlignment);
1163
1164
// If fp is equal to the FailFP magic value (set by the throw stub), then
1165
// report the exception to the JIT caller by jumping into the exception
1166
// stub; otherwise the FP value is still set to the parent ion frame value.
1167
Label exception;
1168
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
1169
1170
// Pop arguments.
1171
masm.freeStack(frameSize);
1172
1173
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1174
fe.funcIndex());
1175
1176
// Store the return value in the JSReturnOperand.
1177
const ValTypeVector& results = fe.funcType().results();
1178
if (results.length() == 0) {
1179
GenPrintf(DebugChannel::Function, masm, "void");
1180
masm.moveValue(UndefinedValue(), JSReturnOperand);
1181
} else {
1182
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1183
switch (results[0].code()) {
1184
case ValType::I32:
1185
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1186
masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
1187
break;
1188
case ValType::F32: {
1189
masm.canonicalizeFloat(ReturnFloat32Reg);
1190
masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
1191
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1192
ScratchDoubleScope fpscratch(masm);
1193
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1194
break;
1195
}
1196
case ValType::F64: {
1197
masm.canonicalizeDouble(ReturnDoubleReg);
1198
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1199
ScratchDoubleScope fpscratch(masm);
1200
masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
1201
break;
1202
}
1203
case ValType::I64: {
1204
#ifdef ENABLE_WASM_BIGINT
1205
GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
1206
GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, &fe,
1207
&exception);
1208
masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
1209
break;
1210
#else
1211
MOZ_CRASH("unexpected return type when calling from ion to wasm");
1212
#endif
1213
}
1214
case ValType::FuncRef:
1215
// For FuncRef use the AnyRef path for now, since that will work.
1216
case ValType::AnyRef: {
1217
// Per comment above, the call may have clobbered the Tls register, so
1218
// reload since unboxing will need it.
1219
GenerateJitEntryLoadTls(masm, /* frameSize */ 0);
1220
UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg, JSReturnOperand,
1221
WasmJitEntryReturnScratch);
1222
break;
1223
}
1224
case ValType::Ref:
1225
MOZ_CRASH("returning reference in jitentry NYI");
1226
break;
1227
case ValType::NullRef:
1228
MOZ_CRASH("NullRef not expressible");
1229
}
1230
}
1231
1232
GenPrintf(DebugChannel::Function, masm, "\n");
1233
1234
MOZ_ASSERT(masm.framePushed() == 0);
1235
#ifdef JS_CODEGEN_ARM64
1236
masm.loadPtr(Address(sp, 0), lr);
1237
masm.addToStackPtr(Imm32(8));
1238
masm.moveStackPtrTo(PseudoStackPointer);
1239
masm.abiret();
1240
#else
1241
masm.ret();
1242
#endif
1243
1244
// Generate an OOL call to the C++ conversion path.
1245
if (fe.funcType().args().length()) {
1246
masm.bind(&oolCall);
1247
masm.setFramePushed(frameSize);
1248
1249
ABIArgMIRTypeIter argsIter(coerceArgTypes);
1250
1251
// argument 0: function export index.
1252
if (argsIter->kind() == ABIArg::GPR) {
1253
masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
1254
} else {
1255
masm.storePtr(ImmWord(funcExportIndex),
1256
Address(sp, argsIter->offsetFromArgBase()));
1257
}
1258
argsIter++;
1259
1260
// argument 1: tlsData
1261
if (argsIter->kind() == ABIArg::GPR) {
1262
masm.movePtr(WasmTlsReg, argsIter->gpr());
1263
} else {
1264
masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
1265
}
1266
argsIter++;
1267
1268
// argument 2: effective address of start of argv
1269
Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
1270
if (argsIter->kind() == ABIArg::GPR) {
1271
masm.computeEffectiveAddress(argv, argsIter->gpr());
1272
} else {
1273
masm.computeEffectiveAddress(argv, ScratchIonEntry);
1274
masm.storePtr(ScratchIonEntry,
1275
Address(sp, argsIter->offsetFromArgBase()));
1276
}
1277
argsIter++;
1278
MOZ_ASSERT(argsIter.done());
1279
1280
masm.assertStackAlignment(ABIStackAlignment);
1281
CallSymbolicAddress(masm, !fe.hasEagerStubs(),
1282
SymbolicAddress::CoerceInPlace_JitEntry);
1283
masm.assertStackAlignment(ABIStackAlignment);
1284
1285
masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
1286
&rejoinBeforeCall);
1287
}
1288
1289
// Prepare to throw: reload WasmTlsReg from the frame.
1290
masm.bind(&exception);
1291
masm.setFramePushed(frameSize);
1292
GenerateJitEntryThrow(masm, frameSize);
1293
1294
return FinishOffsets(masm, offsets);
1295
}
1296
1297
void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
1298
const Instance& inst,
1299
const JitCallStackArgVector& stackArgs,
1300
bool profilingEnabled, Register scratch,
1301
uint32_t* callOffset) {
1302
MOZ_ASSERT(!IsCompilingWasm());
1303
1304
size_t framePushedAtStart = masm.framePushed();
1305
1306
if (profilingEnabled) {
1307
// FramePointer isn't volatile, manually preserve it because it will be
1308
// clobbered below.
1309
masm.Push(FramePointer);
1310
} else {
1311
#ifdef DEBUG
1312
// Ensure that the FramePointer is actually Ion-volatile. This might
1313
// assert when bug 1426134 lands.
1314
AllocatableRegisterSet set(RegisterSet::All());
1315
TakeJitRegisters(/* profiling */ false, &set);
1316
MOZ_ASSERT(set.has(FramePointer),
1317
"replace the whole if branch by the then body when this fails");
1318
#endif
1319
}
1320
1321
// Note, if code here pushes a reference value into the frame for its own
1322
// purposes (and not just as an argument to the callee) then the frame must be
1323
// traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The
1324
// callee will trace values that are pushed as arguments, however.
1325
1326
// Push a special frame descriptor that indicates the frame size so we can
1327
// directly iterate from the current JIT frame without an extra call.
1328
*callOffset = masm.buildFakeExitFrame(scratch);
1329
masm.loadJSContext(scratch);
1330
1331
masm.moveStackPtrTo(FramePointer);
1332
masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
1333
masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
1334
1335
// Move stack arguments to their final locations.
1336
unsigned bytesNeeded = StackArgBytes(fe.funcType().args());
1337
bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
1338
bytesNeeded);
1339
if (bytesNeeded) {
1340
masm.reserveStack(bytesNeeded);
1341
}
1342
1343
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
1344
fe.funcIndex());
1345
1346
for (ABIArgValTypeIter iter(fe.funcType().args()); !iter.done(); iter++) {
1347
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
1348
MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
1349
if (iter->kind() != ABIArg::Stack) {
1350
switch (iter.mirType()) {
1351
case MIRType::Int32:
1352
GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
1353
break;
1354
case MIRType::Float32:
1355
GenPrintF32(DebugChannel::Function, masm, iter->fpu());
1356
break;
1357
case MIRType::Double:
1358
GenPrintF64(DebugChannel::Function, masm, iter->fpu());
1359
break;
1360
case MIRType::RefOrNull:
1361
GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
1362
break;
1363
default:
1364
MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
1365
}
1366
continue;
1367
}
1368
1369
Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
1370
1371
const JitCallStackArg& stackArg = stackArgs[iter.index()];
1372
switch (stackArg.tag()) {
1373
case JitCallStackArg::Tag::Imm32:
1374
GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
1375
masm.storePtr(ImmWord(stackArg.imm32()), dst);
1376
break;
1377
case JitCallStackArg::Tag::GPR:
1378
MOZ_ASSERT(stackArg.gpr() != scratch);
1379
MOZ_ASSERT(stackArg.gpr() != FramePointer);
1380
GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
1381
masm.storePtr(stackArg.gpr(), dst);
1382
break;
1383
case JitCallStackArg::Tag::FPU:
1384
switch (iter.mirType()) {
1385
case MIRType::Double:
1386
GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
1387
masm.storeDouble(stackArg.fpu(), dst);
1388
break;
1389
case MIRType::Float32:
1390
GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
1391
masm.storeFloat32(stackArg.fpu(), dst);
1392
break;
1393
default:
1394
MOZ_CRASH(
1395
"unexpected MIR type for a float register in wasm fast call");
1396
}
1397
break;
1398
case JitCallStackArg::Tag::Address: {
1399
// The address offsets were valid *before* we pushed our frame.
1400
Address src = stackArg.addr();
1401
src.offset += masm.framePushed() - framePushedAtStart;
1402
switch (iter.mirType()) {
1403
case MIRType::Double: {
1404
ScratchDoubleScope fpscratch(masm);
1405
GenPrintF64(DebugChannel::Function, masm, fpscratch);
1406
masm.loadDouble(src, fpscratch);
1407
masm.storeDouble(fpscratch, dst);
1408
break;
1409
}
1410
case MIRType::Float32: {
1411
ScratchFloat32Scope fpscratch(masm);
1412
masm.loadFloat32(src, fpscratch);
1413
GenPrintF32(DebugChannel::Function, masm, fpscratch);
1414
masm.storeFloat32(fpscratch, dst);
1415
break;
1416
}
1417
case MIRType::Int32: {
1418
masm.loadPtr(src, scratch);
1419
GenPrintIsize(DebugChannel::Function, masm, scratch);
1420
masm.storePtr(scratch, dst);
1421
break;
1422
}
1423
case MIRType::RefOrNull: {
1424
masm.loadPtr(src, scratch);
1425
GenPrintPtr(DebugChannel::Function, masm, scratch);
1426
masm.storePtr(scratch, dst);
1427
break;
1428
}
1429
default: {
1430
MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
1431
}
1432
}
1433
break;
1434
}
1435
case JitCallStackArg::Tag::Undefined: {
1436
MOZ_CRASH("can't happen because of arg.kind() check");
1437
}
1438
}
1439
}
1440
1441
GenPrintf(DebugChannel::Function, masm, "\n");
1442
1443
// Load tls; from now on, WasmTlsReg is live.
1444
masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
1445
masm.loadWasmPinnedRegsFromTls();
1446
1447
// Actual call.
1448
const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
1449
const MetadataTier& metadata = codeTier.metadata();
1450
const CodeRange& codeRange = metadata.codeRange(fe);
1451
void* callee = codeTier.segment().base() + codeRange.funcNormalEntry();
1452
1453
masm.assertStackAlignment(WasmStackAlignment);
1454
masm.callJit(ImmPtr(callee));
1455
#ifdef JS_CODEGEN_ARM64
1456
// WASM does not use the emulated stack pointer, so reinitialize it as it
1457
// might be clobbered either by WASM or by any C++ calls within.
1458
masm.initPseudoStackPtr();
1459
#endif
1460
masm.assertStackAlignment(WasmStackAlignment);
1461
1462
masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
1463
masm.exceptionLabel());
1464
1465
// Store the return value in the appropriate place.
1466
GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
1467
fe.funcIndex());
1468
const ValTypeVector& results = fe.funcType().results();
1469
if (results.length() == 0) {
1470
masm.moveValue(UndefinedValue(), JSReturnOperand);
1471
GenPrintf(DebugChannel::Function, masm, "void");
1472
} else {
1473
MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
1474
switch (results[0].code()) {
1475
case wasm::ValType::I32:
1476
// The return value is in ReturnReg, which is what Ion expects.
1477
GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
1478
break;
1479
case wasm::ValType::F32:
1480
masm.canonicalizeFloat(ReturnFloat32Reg);
1481
GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
1482
break;
1483
case wasm::ValType::F64:
1484
masm.canonicalizeDouble(ReturnDoubleReg);
1485
GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
1486
break;
1487
case wasm::ValType::FuncRef:
1488
// For FuncRef, use the AnyRef path for now, since that will work.
1489
case wasm::ValType::AnyRef:
1490
// The call to wasm above preserves the WasmTlsReg, we don't need to
1491
// reload it here.
1492
UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg, JSReturnOperand,
1493
WasmJitEntryReturnScratch);
1494
break;
1495
case wasm::ValType::Ref:
1496
case wasm::ValType::I64:
1497
MOZ_CRASH("unexpected return type when calling from ion to wasm");
1498
case wasm::ValType::NullRef:
1499
MOZ_CRASH("NullRef not expressible");
1500
}
1501
}
1502
1503
GenPrintf(DebugChannel::Function, masm, "\n");
1504
1505
// Free args + frame descriptor.
1506
masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
1507
1508
// If we pushed it, free FramePointer.
1509
if (profilingEnabled) {
1510
masm.Pop(FramePointer);
1511
}
1512
1513
MOZ_ASSERT(framePushedAtStart == masm.framePushed());
1514
}
1515
1516
static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
1517
Address src, Address dst) {
1518
if (type == MIRType::Int32) {
1519
masm.load32(src, scratch);
1520
GenPrintIsize(DebugChannel::Import, masm, scratch);
1521
masm.store32(scratch, dst);
1522
} else if (type == MIRType::Int64) {
1523
#if JS_BITS_PER_WORD == 32
1524
GenPrintf(DebugChannel::Import, masm, "i64(");
1525
masm.load32(LowWord(src), scratch);
1526
GenPrintIsize(DebugChannel::Import, masm, scratch);
1527
masm.store32(scratch, LowWord(dst));
1528
masm.load32(HighWord(src), scratch);
1529
GenPrintIsize(DebugChannel::Import, masm, scratch);
1530
masm.store32(scratch, HighWord(dst));
1531
GenPrintf(DebugChannel::Import, masm, ") ");
1532
#else
1533
Register64 scratch64(scratch);
1534
masm.load64(src, scratch64);
1535
GenPrintIsize(DebugChannel::Import, masm, scratch);
1536
masm.store64(scratch64, dst);
1537
#endif
1538
} else if (type == MIRType::RefOrNull || type == MIRType::Pointer) {
1539
masm.loadPtr(src, scratch);
1540
GenPrintPtr(DebugChannel::Import, masm, scratch);
1541
masm.storePtr(scratch, dst);
1542
} else if (type == MIRType::Float32) {
1543
ScratchFloat32Scope fpscratch(masm);
1544
masm.loadFloat32(src, fpscratch);
1545
GenPrintF32(DebugChannel::Import, masm, fpscratch);
1546
masm.storeFloat32(fpscratch, dst);
1547
} else if (type == MIRType::Double) {
1548
ScratchDoubleScope fpscratch(masm);
1549
masm.loadDouble(src, fpscratch);
1550
GenPrintF64(DebugChannel::Import, masm, fpscratch);
1551
masm.storeDouble(fpscratch, dst);
1552
} else {
1553
MOZ_CRASH("StackCopy: unexpected type");
1554
}
1555
}
1556
1557
typedef bool ToValue;
1558
1559
// Note, when toValue is true then this may destroy the values in incoming
1560
// argument registers as a result of Spectre mitigation.
1561
static void FillArgumentArrayForExit(
1562
MacroAssembler& masm, Register tls, unsigned funcImportIndex,
1563
const ValTypeVector& args, unsigned argOffset,
1564
unsigned offsetToCallerStackArgs, Register scratch, Register scratch2,
1565
Register scratch3, ToValue toValue, Label* throwLabel) {
1566
MOZ_ASSERT(scratch != scratch2);
1567
MOZ_ASSERT(scratch != scratch3);
1568
MOZ_ASSERT(scratch2 != scratch3);
1569
1570
// This loop does not root the values that are being constructed in
1571
// for the arguments. Allocations that are generated by code either
1572
// in the loop or called from it should be NoGC allocations.
1573
GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
1574
funcImportIndex);
1575
1576
for (ABIArgValTypeIter i(args); !i.done(); i++) {
1577
Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
1578
1579
MIRType type = i.mirType();
1580
switch (i->kind()) {
1581
case ABIArg::GPR:
1582
if (type == MIRType::Int32) {
1583
GenPrintIsize(DebugChannel::Import, masm, i->gpr());
1584
if (toValue) {
1585
masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
1586
} else {
1587
masm.store32(i->gpr(), dst);
1588
}
1589
} else if (type == MIRType::Int64) {
1590
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1591
1592
if (toValue) {
1593
#ifdef ENABLE_WASM_BIGINT
1594
GenerateBigIntInitialization(masm, offsetToCallerStackArgs,
1595
i->gpr64(), scratch, nullptr,
1596
throwLabel);
1597
masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
1598
#else
1599
// Should be unreachable as I64 cases should error earlier.
1600
masm.breakpoint();
1601
#endif
1602
} else {
1603
masm.store64(i->gpr64(), dst);
1604
}
1605
} else if (type == MIRType::RefOrNull) {
1606
if (toValue) {
1607
// This works also for FuncRef because it is distinguishable from
1608
// a boxed AnyRef.
1609
masm.movePtr(i->gpr(), scratch2);
1610
UnboxAnyrefIntoValue(masm, tls, scratch2, dst, scratch);
1611
} else {
1612
GenPrintPtr(DebugChannel::Import, masm, i->gpr());
1613
masm.storePtr(i->gpr(), dst);
1614
}
1615
} else {
1616
MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
1617
}
1618
break;
1619
#ifdef JS_CODEGEN_REGISTER_PAIR
1620
case ABIArg::GPR_PAIR:
1621
if (type == MIRType::Int64) {
1622
GenPrintI64(DebugChannel::Import, masm, i->gpr64());
1623
1624
if (toValue) {
1625
# ifdef ENABLE_WASM_BIGINT
1626
GenerateBigIntInitialization(masm, offsetToCallerStackArgs,
1627
i->gpr64(), scratch, nullptr,
1628
throwLabel);
1629
masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
1630
# else
1631
masm.breakpoint();
1632
# endif
1633
} else {
1634
masm.store64(i->gpr64(), dst);
1635
}
1636
} else {
1637
MOZ_CRASH("wasm uses hardfp for function calls.");
1638
}
1639
break;
1640
#endif
1641
case ABIArg::FPU: {
1642
MOZ_ASSERT(IsFloatingPointType(type));
1643
FloatRegister srcReg = i->fpu();
1644
if (type == MIRType::Double) {
1645
if (toValue) {
1646
// Preserve the NaN pattern in the input.
1647
ScratchDoubleScope fpscratch(masm);