Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
*
4
* Copyright 2015 Mozilla Foundation
5
*
6
* Licensed under the Apache License, Version 2.0 (the "License");
7
* you may not use this file except in compliance with the License.
8
* You may obtain a copy of the License at
9
*
11
*
12
* Unless required by applicable law or agreed to in writing, software
13
* distributed under the License is distributed on an "AS IS" BASIS,
14
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
* See the License for the specific language governing permissions and
16
* limitations under the License.
17
*/
18
19
#include "wasm/WasmIonCompile.h"
20
21
#include "mozilla/MathAlgorithms.h"
22
23
#include <algorithm>
24
25
#include "jit/CodeGenerator.h"
26
27
#include "wasm/WasmBaselineCompile.h"
28
#include "wasm/WasmBuiltins.h"
29
#include "wasm/WasmGC.h"
30
#include "wasm/WasmGenerator.h"
31
#include "wasm/WasmOpIter.h"
32
#include "wasm/WasmSignalHandlers.h"
33
#include "wasm/WasmStubs.h"
34
#include "wasm/WasmValidate.h"
35
36
using namespace js;
37
using namespace js::jit;
38
using namespace js::wasm;
39
40
using mozilla::IsPowerOfTwo;
41
using mozilla::Maybe;
42
using mozilla::Nothing;
43
using mozilla::Some;
44
45
namespace {
46
47
typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
48
typedef Vector<MDefinition*, 8, SystemAllocPolicy> DefVector;
49
50
struct IonCompilePolicy {
51
// We store SSA definitions in the value stack.
52
typedef MDefinition* Value;
53
typedef DefVector ValueVector;
54
55
// We store loop headers and then/else blocks in the control flow stack.
56
typedef MBasicBlock* ControlItem;
57
};
58
59
typedef OpIter<IonCompilePolicy> IonOpIter;
60
61
class FunctionCompiler;
62
63
// CallCompileState describes a call that is being compiled.
64
65
class CallCompileState {
66
// A generator object that is passed each argument as it is compiled.
67
ABIArgGenerator abi_;
68
69
// Accumulates the register arguments while compiling arguments.
70
MWasmCall::Args regArgs_;
71
72
// Reserved argument for passing Instance* to builtin instance method calls.
73
ABIArg instanceArg_;
74
75
// Only FunctionCompiler should be directly manipulating CallCompileState.
76
friend class FunctionCompiler;
77
};
78
79
// Encapsulates the compilation of a single function in an asm.js module. The
80
// function compiler handles the creation and final backend compilation of the
81
// MIR graph.
82
class FunctionCompiler {
83
struct ControlFlowPatch {
84
MControlInstruction* ins;
85
uint32_t index;
86
ControlFlowPatch(MControlInstruction* ins, uint32_t index)
87
: ins(ins), index(index) {}
88
};
89
90
typedef Vector<ControlFlowPatch, 0, SystemAllocPolicy> ControlFlowPatchVector;
91
typedef Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>
92
ControlFlowPatchsVector;
93
94
const ModuleEnvironment& env_;
95
IonOpIter iter_;
96
const FuncCompileInput& func_;
97
const ValTypeVector& locals_;
98
size_t lastReadCallSite_;
99
100
TempAllocator& alloc_;
101
MIRGraph& graph_;
102
const CompileInfo& info_;
103
MIRGenerator& mirGen_;
104
105
MBasicBlock* curBlock_;
106
uint32_t maxStackArgBytes_;
107
108
uint32_t loopDepth_;
109
uint32_t blockDepth_;
110
ControlFlowPatchsVector blockPatches_;
111
112
// TLS pointer argument to the current function.
113
MWasmParameter* tlsPointer_;
114
115
public:
116
FunctionCompiler(const ModuleEnvironment& env, Decoder& decoder,
117
const FuncCompileInput& func, const ValTypeVector& locals,
118
MIRGenerator& mirGen)
119
: env_(env),
120
iter_(env, decoder),
121
func_(func),
122
locals_(locals),
123
lastReadCallSite_(0),
124
alloc_(mirGen.alloc()),
125
graph_(mirGen.graph()),
126
info_(mirGen.info()),
127
mirGen_(mirGen),
128
curBlock_(nullptr),
129
maxStackArgBytes_(0),
130
loopDepth_(0),
131
blockDepth_(0),
132
tlsPointer_(nullptr) {}
133
134
const ModuleEnvironment& env() const { return env_; }
135
IonOpIter& iter() { return iter_; }
136
TempAllocator& alloc() const { return alloc_; }
137
// FIXME(1401675): Replace with BlockType.
138
uint32_t funcIndex() const { return func_.index; }
139
const FuncType& funcType() const { return *env_.funcTypes[func_.index]; }
140
141
BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
142
BytecodeOffset bytecodeIfNotAsmJS() const {
143
return env_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
144
}
145
146
bool init() {
147
// Prepare the entry block for MIR generation:
148
149
const ValTypeVector& args = funcType().args();
150
151
if (!mirGen_.ensureBallast()) {
152
return false;
153
}
154
if (!newBlock(/* prev */ nullptr, &curBlock_)) {
155
return false;
156
}
157
158
for (ABIArgIter<ValTypeVector> i(args); !i.done(); i++) {
159
MOZ_ASSERT(i.mirType() != MIRType::Pointer);
160
MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
161
curBlock_->add(ins);
162
curBlock_->initSlot(info().localSlot(i.index()), ins);
163
if (!mirGen_.ensureBallast()) {
164
return false;
165
}
166
}
167
168
// Set up a parameter that receives the hidden TLS pointer argument.
169
tlsPointer_ =
170
MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
171
curBlock_->add(tlsPointer_);
172
if (!mirGen_.ensureBallast()) {
173
return false;
174
}
175
176
for (size_t i = args.length(); i < locals_.length(); i++) {
177
MInstruction* ins = nullptr;
178
switch (locals_[i].code()) {
179
case ValType::I32:
180
ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
181
break;
182
case ValType::I64:
183
ins = MConstant::NewInt64(alloc(), 0);
184
break;
185
case ValType::F32:
186
ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
187
break;
188
case ValType::F64:
189
ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
190
break;
191
case ValType::Ref:
192
case ValType::FuncRef:
193
case ValType::AnyRef:
194
ins = MWasmNullConstant::New(alloc());
195
break;
196
case ValType::NullRef:
197
MOZ_CRASH("NullRef not expressible");
198
}
199
200
curBlock_->add(ins);
201
curBlock_->initSlot(info().localSlot(i), ins);
202
if (!mirGen_.ensureBallast()) {
203
return false;
204
}
205
}
206
207
return true;
208
}
209
210
void finish() {
211
mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
212
213
MOZ_ASSERT(loopDepth_ == 0);
214
MOZ_ASSERT(blockDepth_ == 0);
215
#ifdef DEBUG
216
for (ControlFlowPatchVector& patches : blockPatches_) {
217
MOZ_ASSERT(patches.empty());
218
}
219
#endif
220
MOZ_ASSERT(inDeadCode());
221
MOZ_ASSERT(done(), "all bytes must be consumed");
222
MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
223
}
224
225
/************************* Read-only interface (after local scope setup) */
226
227
MIRGenerator& mirGen() const { return mirGen_; }
228
MIRGraph& mirGraph() const { return graph_; }
229
const CompileInfo& info() const { return info_; }
230
231
MDefinition* getLocalDef(unsigned slot) {
232
if (inDeadCode()) {
233
return nullptr;
234
}
235
return curBlock_->getSlot(info().localSlot(slot));
236
}
237
238
const ValTypeVector& locals() const { return locals_; }
239
240
/***************************** Code generation (after local scope setup) */
241
242
MDefinition* constant(const Value& v, MIRType type) {
243
if (inDeadCode()) {
244
return nullptr;
245
}
246
MConstant* constant = MConstant::New(alloc(), v, type);
247
curBlock_->add(constant);
248
return constant;
249
}
250
251
MDefinition* constant(float f) {
252
if (inDeadCode()) {
253
return nullptr;
254
}
255
auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
256
curBlock_->add(cst);
257
return cst;
258
}
259
260
MDefinition* constant(double d) {
261
if (inDeadCode()) {
262
return nullptr;
263
}
264
auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
265
curBlock_->add(cst);
266
return cst;
267
}
268
269
MDefinition* constant(int64_t i) {
270
if (inDeadCode()) {
271
return nullptr;
272
}
273
MConstant* constant = MConstant::NewInt64(alloc(), i);
274
curBlock_->add(constant);
275
return constant;
276
}
277
278
MDefinition* nullRefConstant() {
279
if (inDeadCode()) {
280
return nullptr;
281
}
282
// MConstant has a lot of baggage so we don't use that here.
283
MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
284
curBlock_->add(constant);
285
return constant;
286
}
287
288
void fence() {
289
if (inDeadCode()) {
290
return;
291
}
292
MWasmFence* ins = MWasmFence::New(alloc());
293
curBlock_->add(ins);
294
}
295
296
template <class T>
297
MDefinition* unary(MDefinition* op) {
298
if (inDeadCode()) {
299
return nullptr;
300
}
301
T* ins = T::New(alloc(), op);
302
curBlock_->add(ins);
303
return ins;
304
}
305
306
template <class T>
307
MDefinition* unary(MDefinition* op, MIRType type) {
308
if (inDeadCode()) {
309
return nullptr;
310
}
311
T* ins = T::New(alloc(), op, type);
312
curBlock_->add(ins);
313
return ins;
314
}
315
316
template <class T>
317
MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
318
if (inDeadCode()) {
319
return nullptr;
320
}
321
T* ins = T::New(alloc(), lhs, rhs);
322
curBlock_->add(ins);
323
return ins;
324
}
325
326
template <class T>
327
MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
328
if (inDeadCode()) {
329
return nullptr;
330
}
331
T* ins = T::New(alloc(), lhs, rhs, type);
332
curBlock_->add(ins);
333
return ins;
334
}
335
336
bool mustPreserveNaN(MIRType type) {
337
return IsFloatingPointType(type) && !env().isAsmJS();
338
}
339
340
MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
341
if (inDeadCode()) {
342
return nullptr;
343
}
344
345
// wasm can't fold x - 0.0 because of NaN with custom payloads.
346
MSub* ins = MSub::New(alloc(), lhs, rhs, type, mustPreserveNaN(type));
347
curBlock_->add(ins);
348
return ins;
349
}
350
351
MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
352
if (inDeadCode()) {
353
return nullptr;
354
}
355
356
auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
357
curBlock_->add(ins);
358
return ins;
359
}
360
361
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
362
bool isMax) {
363
if (inDeadCode()) {
364
return nullptr;
365
}
366
367
if (mustPreserveNaN(type)) {
368
// Convert signaling NaN to quiet NaNs.
369
MDefinition* zero = constant(DoubleValue(0.0), type);
370
lhs = sub(lhs, zero, type);
371
rhs = sub(rhs, zero, type);
372
}
373
374
MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
375
curBlock_->add(ins);
376
return ins;
377
}
378
379
MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
380
MMul::Mode mode) {
381
if (inDeadCode()) {
382
return nullptr;
383
}
384
385
// wasm can't fold x * 1.0 because of NaN with custom payloads.
386
auto* ins =
387
MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
388
curBlock_->add(ins);
389
return ins;
390
}
391
392
MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
393
bool unsignd) {
394
if (inDeadCode()) {
395
return nullptr;
396
}
397
bool trapOnError = !env().isAsmJS();
398
if (!unsignd && type == MIRType::Int32) {
399
// Enforce the signedness of the operation by coercing the operands
400
// to signed. Otherwise, operands that "look" unsigned to Ion but
401
// are not unsigned to Baldr (eg, unsigned right shifts) may lead to
402
// the operation being executed unsigned. Applies to mod() as well.
403
//
404
// Do this for Int32 only since Int64 is not subject to the same
405
// issues.
406
//
407
// Note the offsets passed to MTruncateToInt32 are wrong here, but
408
// it doesn't matter: they're not codegen'd to calls since inputs
409
// already are int32.
410
auto* lhs2 = MTruncateToInt32::New(alloc(), lhs);
411
curBlock_->add(lhs2);
412
lhs = lhs2;
413
auto* rhs2 = MTruncateToInt32::New(alloc(), rhs);
414
curBlock_->add(rhs2);
415
rhs = rhs2;
416
}
417
auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
418
bytecodeOffset(), mustPreserveNaN(type));
419
curBlock_->add(ins);
420
return ins;
421
}
422
423
MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
424
bool unsignd) {
425
if (inDeadCode()) {
426
return nullptr;
427
}
428
bool trapOnError = !env().isAsmJS();
429
if (!unsignd && type == MIRType::Int32) {
430
// See block comment in div().
431
auto* lhs2 = MTruncateToInt32::New(alloc(), lhs);
432
curBlock_->add(lhs2);
433
lhs = lhs2;
434
auto* rhs2 = MTruncateToInt32::New(alloc(), rhs);
435
curBlock_->add(rhs2);
436
rhs = rhs2;
437
}
438
auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
439
bytecodeOffset());
440
curBlock_->add(ins);
441
return ins;
442
}
443
444
MDefinition* bitnot(MDefinition* op) {
445
if (inDeadCode()) {
446
return nullptr;
447
}
448
auto* ins = MBitNot::NewInt32(alloc(), op);
449
curBlock_->add(ins);
450
return ins;
451
}
452
453
MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
454
MDefinition* condExpr) {
455
if (inDeadCode()) {
456
return nullptr;
457
}
458
auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
459
curBlock_->add(ins);
460
return ins;
461
}
462
463
MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
464
if (inDeadCode()) {
465
return nullptr;
466
}
467
auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
468
curBlock_->add(ins);
469
return ins;
470
}
471
472
MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
473
uint32_t targetSize) {
474
if (inDeadCode()) {
475
return nullptr;
476
}
477
MInstruction* ins;
478
switch (targetSize) {
479
case 4: {
480
MSignExtendInt32::Mode mode;
481
switch (srcSize) {
482
case 1:
483
mode = MSignExtendInt32::Byte;
484
break;
485
case 2:
486
mode = MSignExtendInt32::Half;
487
break;
488
default:
489
MOZ_CRASH("Bad sign extension");
490
}
491
ins = MSignExtendInt32::New(alloc(), op, mode);
492
break;
493
}
494
case 8: {
495
MSignExtendInt64::Mode mode;
496
switch (srcSize) {
497
case 1:
498
mode = MSignExtendInt64::Byte;
499
break;
500
case 2:
501
mode = MSignExtendInt64::Half;
502
break;
503
case 4:
504
mode = MSignExtendInt64::Word;
505
break;
506
default:
507
MOZ_CRASH("Bad sign extension");
508
}
509
ins = MSignExtendInt64::New(alloc(), op, mode);
510
break;
511
}
512
default: {
513
MOZ_CRASH("Bad sign extension");
514
}
515
}
516
curBlock_->add(ins);
517
return ins;
518
}
519
520
MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
521
bool isUnsigned) {
522
if (inDeadCode()) {
523
return nullptr;
524
}
525
auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
526
isUnsigned);
527
curBlock_->add(ins);
528
return ins;
529
}
530
531
MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
532
bool left) {
533
if (inDeadCode()) {
534
return nullptr;
535
}
536
auto* ins = MRotate::New(alloc(), input, count, type, left);
537
curBlock_->add(ins);
538
return ins;
539
}
540
541
template <class T>
542
MDefinition* truncate(MDefinition* op, TruncFlags flags) {
543
if (inDeadCode()) {
544
return nullptr;
545
}
546
auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
547
curBlock_->add(ins);
548
return ins;
549
}
550
551
MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
552
MCompare::CompareType type) {
553
if (inDeadCode()) {
554
return nullptr;
555
}
556
auto* ins = MCompare::New(alloc(), lhs, rhs, op, type);
557
curBlock_->add(ins);
558
return ins;
559
}
560
561
void assign(unsigned slot, MDefinition* def) {
562
if (inDeadCode()) {
563
return;
564
}
565
curBlock_->setSlot(info().localSlot(slot), def);
566
}
567
568
private:
569
MWasmLoadTls* maybeLoadMemoryBase() {
570
MWasmLoadTls* load = nullptr;
571
#ifdef JS_CODEGEN_X86
572
AliasSet aliases = env_.maxMemoryLength.isSome()
573
? AliasSet::None()
574
: AliasSet::Load(AliasSet::WasmHeapMeta);
575
load = MWasmLoadTls::New(alloc(), tlsPointer_,
576
offsetof(wasm::TlsData, memoryBase),
577
MIRType::Pointer, aliases);
578
curBlock_->add(load);
579
#endif
580
return load;
581
}
582
583
MWasmLoadTls* maybeLoadBoundsCheckLimit() {
584
if (env_.hugeMemoryEnabled()) {
585
return nullptr;
586
}
587
AliasSet aliases = env_.maxMemoryLength.isSome()
588
? AliasSet::None()
589
: AliasSet::Load(AliasSet::WasmHeapMeta);
590
auto load = MWasmLoadTls::New(alloc(), tlsPointer_,
591
offsetof(wasm::TlsData, boundsCheckLimit),
592
MIRType::Int32, aliases);
593
curBlock_->add(load);
594
return load;
595
}
596
597
public:
598
MWasmHeapBase* memoryBase() {
599
MWasmHeapBase* base = nullptr;
600
AliasSet aliases = env_.maxMemoryLength.isSome()
601
? AliasSet::None()
602
: AliasSet::Load(AliasSet::WasmHeapMeta);
603
base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
604
curBlock_->add(base);
605
return base;
606
}
607
608
private:
609
// Only sets *mustAdd if it also returns true.
610
bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
611
bool* mustAdd) {
612
MOZ_ASSERT(!*mustAdd);
613
614
// asm.js accesses are always aligned and need no checks.
615
if (env_.isAsmJS() || !access->isAtomic()) {
616
return false;
617
}
618
619
if (base->isConstant()) {
620
int32_t ptr = base->toConstant()->toInt32();
621
// OK to wrap around the address computation here.
622
if (((ptr + access->offset()) & (access->byteSize() - 1)) == 0) {
623
return false;
624
}
625
}
626
627
*mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
628
return true;
629
}
630
631
void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
632
MDefinition** base) {
633
MOZ_ASSERT(!inDeadCode());
634
635
uint32_t offsetGuardLimit = GetOffsetGuardLimit(env_.hugeMemoryEnabled());
636
637
// Fold a constant base into the offset (so the base is 0 in which case
638
// the codegen is optimized), if it doesn't wrap or trigger an
639
// MWasmAddOffset.
640
if ((*base)->isConstant()) {
641
uint32_t basePtr = (*base)->toConstant()->toInt32();
642
uint32_t offset = access->offset();
643
644
if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
645
auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
646
curBlock_->add(ins);
647
*base = ins;
648
access->setOffset(access->offset() + basePtr);
649
}
650
}
651
652
bool mustAdd = false;
653
bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
654
655
// If the offset is bigger than the guard region, a separate instruction
656
// is necessary to add the offset to the base and check for overflow.
657
//
658
// Also add the offset if we have a Wasm atomic access that needs
659
// alignment checking and the offset affects alignment.
660
if (access->offset() >= offsetGuardLimit || mustAdd ||
661
!JitOptions.wasmFoldOffsets) {
662
*base = computeEffectiveAddress(*base, access);
663
}
664
665
if (alignmentCheck) {
666
curBlock_->add(MWasmAlignmentCheck::New(
667
alloc(), *base, access->byteSize(), bytecodeOffset()));
668
}
669
670
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
671
if (boundsCheckLimit) {
672
auto* ins = MWasmBoundsCheck::New(alloc(), *base, boundsCheckLimit,
673
bytecodeOffset());
674
curBlock_->add(ins);
675
if (JitOptions.spectreIndexMasking) {
676
*base = ins;
677
}
678
}
679
}
680
681
bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
682
if (result == ValType::I64 && access->byteSize() <= 4) {
683
// These smaller accesses should all be zero-extending.
684
MOZ_ASSERT(!isSignedIntType(access->type()));
685
return true;
686
}
687
return false;
688
}
689
690
public:
691
MDefinition* computeEffectiveAddress(MDefinition* base,
692
MemoryAccessDesc* access) {
693
if (inDeadCode()) {
694
return nullptr;
695
}
696
if (!access->offset()) {
697
return base;
698
}
699
auto* ins =
700
MWasmAddOffset::New(alloc(), base, access->offset(), bytecodeOffset());
701
curBlock_->add(ins);
702
access->clearOffset();
703
return ins;
704
}
705
706
MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
707
ValType result) {
708
if (inDeadCode()) {
709
return nullptr;
710
}
711
712
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
713
MInstruction* load = nullptr;
714
if (env_.isAsmJS()) {
715
MOZ_ASSERT(access->offset() == 0);
716
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
717
load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
718
access->type());
719
} else {
720
checkOffsetAndAlignmentAndBounds(access, &base);
721
load =
722
MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
723
}
724
if (!load) {
725
return nullptr;
726
}
727
curBlock_->add(load);
728
return load;
729
}
730
731
void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
732
if (inDeadCode()) {
733
return;
734
}
735
736
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
737
MInstruction* store = nullptr;
738
if (env_.isAsmJS()) {
739
MOZ_ASSERT(access->offset() == 0);
740
MWasmLoadTls* boundsCheckLimit = maybeLoadBoundsCheckLimit();
741
store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
742
access->type(), v);
743
} else {
744
checkOffsetAndAlignmentAndBounds(access, &base);
745
store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
746
}
747
if (!store) {
748
return;
749
}
750
curBlock_->add(store);
751
}
752
753
MDefinition* atomicCompareExchangeHeap(MDefinition* base,
754
MemoryAccessDesc* access,
755
ValType result, MDefinition* oldv,
756
MDefinition* newv) {
757
if (inDeadCode()) {
758
return nullptr;
759
}
760
761
checkOffsetAndAlignmentAndBounds(access, &base);
762
763
if (isSmallerAccessForI64(result, access)) {
764
auto* cvtOldv =
765
MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
766
curBlock_->add(cvtOldv);
767
oldv = cvtOldv;
768
769
auto* cvtNewv =
770
MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
771
curBlock_->add(cvtNewv);
772
newv = cvtNewv;
773
}
774
775
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
776
MInstruction* cas =
777
MWasmCompareExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
778
base, *access, oldv, newv, tlsPointer_);
779
if (!cas) {
780
return nullptr;
781
}
782
curBlock_->add(cas);
783
784
if (isSmallerAccessForI64(result, access)) {
785
cas = MExtendInt32ToInt64::New(alloc(), cas, true);
786
curBlock_->add(cas);
787
}
788
789
return cas;
790
}
791
792
MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
793
ValType result, MDefinition* value) {
794
if (inDeadCode()) {
795
return nullptr;
796
}
797
798
checkOffsetAndAlignmentAndBounds(access, &base);
799
800
if (isSmallerAccessForI64(result, access)) {
801
auto* cvtValue =
802
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
803
curBlock_->add(cvtValue);
804
value = cvtValue;
805
}
806
807
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
808
MInstruction* xchg =
809
MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
810
base, *access, value, tlsPointer_);
811
if (!xchg) {
812
return nullptr;
813
}
814
curBlock_->add(xchg);
815
816
if (isSmallerAccessForI64(result, access)) {
817
xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
818
curBlock_->add(xchg);
819
}
820
821
return xchg;
822
}
823
824
MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
825
MemoryAccessDesc* access, ValType result,
826
MDefinition* value) {
827
if (inDeadCode()) {
828
return nullptr;
829
}
830
831
checkOffsetAndAlignmentAndBounds(access, &base);
832
833
if (isSmallerAccessForI64(result, access)) {
834
auto* cvtValue =
835
MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
836
curBlock_->add(cvtValue);
837
value = cvtValue;
838
}
839
840
MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
841
MInstruction* binop =
842
MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
843
base, *access, value, tlsPointer_);
844
if (!binop) {
845
return nullptr;
846
}
847
curBlock_->add(binop);
848
849
if (isSmallerAccessForI64(result, access)) {
850
binop = MExtendInt32ToInt64::New(alloc(), binop, true);
851
curBlock_->add(binop);
852
}
853
854
return binop;
855
}
856
857
MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst,
858
bool isIndirect, MIRType type) {
859
if (inDeadCode()) {
860
return nullptr;
861
}
862
863
MInstruction* load;
864
if (isIndirect) {
865
// Pull a pointer to the value out of TlsData::globalArea, then
866
// load from that pointer. Note that the pointer is immutable
867
// even though the value it points at may change, hence the use of
868
// |true| for the first node's |isConst| value, irrespective of
869
// the |isConst| formal parameter to this method. The latter
870
// applies to the denoted value as a whole.
871
auto* cellPtr =
872
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
873
/*isConst=*/true, tlsPointer_);
874
curBlock_->add(cellPtr);
875
load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
876
} else {
877
// Pull the value directly out of TlsData::globalArea.
878
load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst,
879
tlsPointer_);
880
}
881
curBlock_->add(load);
882
return load;
883
}
884
885
MInstruction* storeGlobalVar(uint32_t globalDataOffset, bool isIndirect,
886
MDefinition* v) {
887
if (inDeadCode()) {
888
return nullptr;
889
}
890
891
MInstruction* store;
892
MInstruction* valueAddr = nullptr;
893
if (isIndirect) {
894
// Pull a pointer to the value out of TlsData::globalArea, then
895
// store through that pointer.
896
auto* cellPtr =
897
MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
898
/*isConst=*/true, tlsPointer_);
899
curBlock_->add(cellPtr);
900
if (v->type() == MIRType::RefOrNull) {
901
valueAddr = cellPtr;
902
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
903
AliasSet::WasmGlobalCell);
904
} else {
905
store = MWasmStoreGlobalCell::New(alloc(), v, cellPtr);
906
}
907
} else {
908
// Store the value directly in TlsData::globalArea.
909
if (v->type() == MIRType::RefOrNull) {
910
valueAddr = MWasmDerivedPointer::New(
911
alloc(), tlsPointer_,
912
offsetof(wasm::TlsData, globalArea) + globalDataOffset);
913
curBlock_->add(valueAddr);
914
store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
915
AliasSet::WasmGlobalVar);
916
} else {
917
store =
918
MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_);
919
}
920
}
921
curBlock_->add(store);
922
923
return valueAddr;
924
}
925
926
void addInterruptCheck() {
927
if (inDeadCode()) {
928
return;
929
}
930
curBlock_->add(
931
MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
932
}
933
934
/***************************************************************** Calls */
935
936
// The IonMonkey backend maintains a single stack offset (from the stack
937
// pointer to the base of the frame) by adding the total amount of spill
938
// space required plus the maximum stack required for argument passing.
939
// Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
940
// manually accumulate, for the entire function, the maximum required stack
941
// space for argument passing. (This is passed to the CodeGenerator via
942
// MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
943
// stack space required for each individual call (as determined by the call
944
// ABI).
945
946
// Operations that modify a CallCompileState.
947
948
bool passInstance(MIRType instanceType, CallCompileState* args) {
949
if (inDeadCode()) {
950
return true;
951
}
952
953
// Should only pass an instance once. And it must be a non-GC pointer.
954
MOZ_ASSERT(args->instanceArg_ == ABIArg());
955
MOZ_ASSERT(instanceType == MIRType::Pointer);
956
args->instanceArg_ = args->abi_.next(MIRType::Pointer);
957
return true;
958
}
959
960
// Do not call this directly. Call one of the passArg() variants instead.
961
bool passArgWorker(MDefinition* argDef, MIRType type,
962
CallCompileState* call) {
963
ABIArg arg = call->abi_.next(type);
964
switch (arg.kind()) {
965
#ifdef JS_CODEGEN_REGISTER_PAIR
966
case ABIArg::GPR_PAIR: {
967
auto mirLow =
968
MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
969
curBlock_->add(mirLow);
970
auto mirHigh =
971
MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
972
curBlock_->add(mirHigh);
973
return call->regArgs_.append(
974
MWasmCall::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
975
call->regArgs_.append(
976
MWasmCall::Arg(AnyRegister(arg.gpr64().high), mirHigh));
977
}
978
#endif
979
case ABIArg::GPR:
980
case ABIArg::FPU:
981
return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
982
case ABIArg::Stack: {
983
auto* mir =
984
MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
985
curBlock_->add(mir);
986
return true;
987
}
988
case ABIArg::Uninitialized:
989
MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
990
}
991
MOZ_CRASH("Unknown ABIArg kind.");
992
}
993
994
bool passArg(MDefinition* argDef, MIRType type, CallCompileState* call) {
995
if (inDeadCode()) {
996
return true;
997
}
998
return passArgWorker(argDef, type, call);
999
}
1000
1001
bool passArg(MDefinition* argDef, ValType type, CallCompileState* call) {
1002
if (inDeadCode()) {
1003
return true;
1004
}
1005
return passArgWorker(argDef, ToMIRType(type), call);
1006
}
1007
1008
bool finishCall(CallCompileState* call) {
1009
if (inDeadCode()) {
1010
return true;
1011
}
1012
1013
if (!call->regArgs_.append(
1014
MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_))) {
1015
return false;
1016
}
1017
1018
uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
1019
1020
maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
1021
return true;
1022
}
1023
1024
// Wrappers for creating various kinds of calls.
1025
1026
bool callDirect(const FuncType& funcType, uint32_t funcIndex,
1027
uint32_t lineOrBytecode, const CallCompileState& call,
1028
MDefinition** def) {
1029
if (inDeadCode()) {
1030
*def = nullptr;
1031
return true;
1032
}
1033
1034
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
1035
MIRType ret = ToMIRType(funcType.ret());
1036
auto callee = CalleeDesc::function(funcIndex);
1037
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
1038
StackArgAreaSizeUnaligned(funcType.args()));
1039
if (!ins) {
1040
return false;
1041
}
1042
1043
curBlock_->add(ins);
1044
*def = ins;
1045
return true;
1046
}
1047
1048
bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
1049
MDefinition* index, uint32_t lineOrBytecode,
1050
const CallCompileState& call, MDefinition** def) {
1051
if (inDeadCode()) {
1052
*def = nullptr;
1053
return true;
1054
}
1055
1056
const FuncTypeWithId& funcType = env_.types[funcTypeIndex].funcType();
1057
1058
CalleeDesc callee;
1059
if (env_.isAsmJS()) {
1060
MOZ_ASSERT(tableIndex == 0);
1061
MOZ_ASSERT(funcType.id.kind() == FuncTypeIdDescKind::None);
1062
const TableDesc& table =
1063
env_.tables[env_.asmJSSigToTableIndex[funcTypeIndex]];
1064
MOZ_ASSERT(IsPowerOfTwo(table.limits.initial));
1065
1066
MConstant* mask =
1067
MConstant::New(alloc(), Int32Value(table.limits.initial - 1));
1068
curBlock_->add(mask);
1069
MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
1070
curBlock_->add(maskedIndex);
1071
1072
index = maskedIndex;
1073
callee = CalleeDesc::asmJSTable(table);
1074
} else {
1075
MOZ_ASSERT(funcType.id.kind() != FuncTypeIdDescKind::None);
1076
const TableDesc& table = env_.tables[tableIndex];
1077
callee = CalleeDesc::wasmTable(table, funcType.id);
1078
}
1079
1080
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
1081
auto* ins = MWasmCall::New(
1082
alloc(), desc, callee, call.regArgs_, ToMIRType(funcType.ret()),
1083
StackArgAreaSizeUnaligned(funcType.args()), index);
1084
if (!ins) {
1085
return false;
1086
}
1087
1088
curBlock_->add(ins);
1089
*def = ins;
1090
return true;
1091
}
1092
1093
bool callImport(unsigned globalDataOffset, uint32_t lineOrBytecode,
1094
const CallCompileState& call, const FuncType& funcType,
1095
MDefinition** def) {
1096
if (inDeadCode()) {
1097
*def = nullptr;
1098
return true;
1099
}
1100
1101
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
1102
auto callee = CalleeDesc::import(globalDataOffset);
1103
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
1104
ToMIRType(funcType.ret()),
1105
StackArgAreaSizeUnaligned(funcType.args()));
1106
if (!ins) {
1107
return false;
1108
}
1109
1110
curBlock_->add(ins);
1111
*def = ins;
1112
return true;
1113
}
1114
1115
bool builtinCall(const SymbolicAddressSignature& builtin,
1116
uint32_t lineOrBytecode, const CallCompileState& call,
1117
MDefinition** def) {
1118
if (inDeadCode()) {
1119
*def = nullptr;
1120
return true;
1121
}
1122
1123
MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
1124
1125
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
1126
auto callee = CalleeDesc::builtin(builtin.identity);
1127
auto* ins =
1128
MWasmCall::New(alloc(), desc, callee, call.regArgs_, builtin.retType,
1129
StackArgAreaSizeUnaligned(builtin));
1130
if (!ins) {
1131
return false;
1132
}
1133
1134
curBlock_->add(ins);
1135
*def = ins;
1136
return true;
1137
}
1138
1139
bool builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
1140
uint32_t lineOrBytecode,
1141
const CallCompileState& call,
1142
MDefinition** def = nullptr) {
1143
MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
1144
if (inDeadCode()) {
1145
if (def) {
1146
*def = nullptr;
1147
}
1148
return true;
1149
}
1150
1151
CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
1152
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(
1153
alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
1154
call.regArgs_, builtin.retType, StackArgAreaSizeUnaligned(builtin));
1155
if (!ins) {
1156
return false;
1157
}
1158
1159
curBlock_->add(ins);
1160
if (def) {
1161
*def = ins;
1162
}
1163
return true;
1164
}
1165
1166
/*********************************************** Control flow generation */
1167
1168
inline bool inDeadCode() const { return curBlock_ == nullptr; }
1169
1170
void returnValues(const DefVector& values) {
1171
if (inDeadCode()) {
1172
return;
1173
}
1174
1175
MOZ_ASSERT(values.length() <= 1, "until multi-return");
1176
1177
if (values.empty()) {
1178
curBlock_->end(MWasmReturnVoid::New(alloc()));
1179
} else {
1180
curBlock_->end(MWasmReturn::New(alloc(), values[0]));
1181
}
1182
curBlock_ = nullptr;
1183
}
1184
1185
void unreachableTrap() {
1186
if (inDeadCode()) {
1187
return;
1188
}
1189
1190
auto* ins =
1191
MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
1192
curBlock_->end(ins);
1193
curBlock_ = nullptr;
1194
}
1195
1196
private:
1197
static uint32_t numPushed(MBasicBlock* block) {
1198
return block->stackDepth() - block->info().firstStackSlot();
1199
}
1200
1201
public:
1202
MOZ_MUST_USE bool pushDefs(const DefVector& defs) {
1203
if (inDeadCode()) {
1204
return true;
1205
}
1206
MOZ_ASSERT(numPushed(curBlock_) == 0);
1207
if (!curBlock_->ensureHasSlots(defs.length())) {
1208
return false;
1209
}
1210
for (MDefinition* def : defs) {
1211
MOZ_ASSERT(def->type() != MIRType::None);
1212
curBlock_->push(def);
1213
}
1214
return true;
1215
}
1216
1217
bool popPushedDefs(DefVector* defs) {
1218
size_t n = numPushed(curBlock_);
1219
if (!defs->resizeUninitialized(n)) {
1220
return false;
1221
}
1222
for (; n > 0; n--) {
1223
MDefinition* def = curBlock_->pop();
1224
MOZ_ASSERT(def->type() != MIRType::Value);
1225
(*defs)[n - 1] = def;
1226
}
1227
return true;
1228
}
1229
1230
private:
1231
bool addJoinPredecessor(const DefVector& defs, MBasicBlock** joinPred) {
1232
*joinPred = curBlock_;
1233
if (inDeadCode()) {
1234
return true;
1235
}
1236
return pushDefs(defs);
1237
}
1238
1239
public:
1240
bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock) {
1241
if (inDeadCode()) {
1242
*elseBlock = nullptr;
1243
} else {
1244
MBasicBlock* thenBlock;
1245
if (!newBlock(curBlock_, &thenBlock)) {
1246
return false;
1247
}
1248
if (!newBlock(curBlock_, elseBlock)) {
1249
return false;
1250
}
1251
1252
curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
1253
1254
curBlock_ = thenBlock;
1255
mirGraph().moveBlockToEnd(curBlock_);
1256
}
1257
1258
return startBlock();
1259
}
1260
1261
bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) {
1262
DefVector values;
1263
if (!finishBlock(&values)) {
1264
return false;
1265
}
1266
1267
if (!elseBlock) {
1268
*thenJoinPred = nullptr;
1269
} else {
1270
if (!addJoinPredecessor(values, thenJoinPred)) {
1271
return false;
1272
}
1273
1274
curBlock_ = elseBlock;
1275
mirGraph().moveBlockToEnd(curBlock_);
1276
}
1277
1278
return startBlock();
1279
}
1280
1281
bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
1282
DefVector values;
1283
if (!finishBlock(&values)) {
1284
return false;
1285
}
1286
1287
if (!thenJoinPred && inDeadCode()) {
1288
return true;
1289
}
1290
1291
MBasicBlock* elseJoinPred;
1292
if (!addJoinPredecessor(values, &elseJoinPred)) {
1293
return false;
1294
}
1295
1296
mozilla::Array<MBasicBlock*, 2> blocks;
1297
size_t numJoinPreds = 0;
1298
if (thenJoinPred) {
1299
blocks[numJoinPreds++] = thenJoinPred;
1300
}
1301
if (elseJoinPred) {
1302
blocks[numJoinPreds++] = elseJoinPred;
1303
}
1304
1305
if (numJoinPreds == 0) {
1306
return true;
1307
}
1308
1309
MBasicBlock* join;
1310
if (!goToNewBlock(blocks[0], &join)) {
1311
return false;
1312
}
1313
for (size_t i = 1; i < numJoinPreds; ++i) {
1314
if (!goToExistingBlock(blocks[i], join)) {
1315
return false;
1316
}
1317
}
1318
1319
curBlock_ = join;
1320
return popPushedDefs(defs);
1321
}
1322
1323
bool startBlock() {
1324
MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
1325
blockPatches_[blockDepth_].empty());
1326
blockDepth_++;
1327
return true;
1328
}
1329
1330
bool finishBlock(DefVector* defs) {
1331
MOZ_ASSERT(blockDepth_);
1332
uint32_t topLabel = --blockDepth_;
1333
return bindBranches(topLabel, defs);
1334
}
1335
1336
bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
1337
*loopHeader = nullptr;
1338
1339
blockDepth_++;
1340
loopDepth_++;
1341
1342
if (inDeadCode()) {
1343
return true;
1344
}
1345
1346
// Create the loop header.
1347
MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
1348
*loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
1349
MBasicBlock::PENDING_LOOP_HEADER);
1350
if (!*loopHeader) {
1351
return false;
1352
}
1353
1354
(*loopHeader)->setLoopDepth(loopDepth_);
1355
mirGraph().addBlock(*loopHeader);
1356
curBlock_->end(MGoto::New(alloc(), *loopHeader));
1357
1358
DefVector loopParams;
1359
if (!iter().getResults(paramCount, &loopParams)) {
1360
return false;
1361
}
1362
for (size_t i = 0; i < paramCount; i++) {
1363
MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
1364
if (!phi) {
1365
return false;
1366
}
1367
if (!phi->reserveLength(2)) {
1368
return false;
1369
}
1370
(*loopHeader)->addPhi(phi);
1371
phi->addInput(loopParams[i]);
1372
loopParams[i] = phi;
1373
}
1374
iter().setResults(paramCount, loopParams);
1375
1376
MBasicBlock* body;
1377
if (!goToNewBlock(*loopHeader, &body)) {
1378
return false;
1379
}
1380
curBlock_ = body;
1381
return true;
1382
}
1383
1384
private:
1385
void fixupRedundantPhis(MBasicBlock* b) {
1386
for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
1387
MDefinition* def = b->getSlot(i);
1388
if (def->isUnused()) {
1389
b->setSlot(i, def->toPhi()->getOperand(0));
1390
}
1391
}
1392
}
1393
1394
bool setLoopBackedge(MBasicBlock* loopEntry, MBasicBlock* loopBody,
1395
MBasicBlock* backedge, size_t paramCount) {
1396
if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
1397
return false;
1398
}
1399
1400
// Flag all redundant phis as unused.
1401
for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
1402
phi++) {
1403
MOZ_ASSERT(phi->numOperands() == 2);
1404
if (phi->getOperand(0) == phi->getOperand(1)) {
1405
phi->setUnused();
1406
}
1407
}
1408
1409
// Fix up phis stored in the slots Vector of pending blocks.
1410
for (ControlFlowPatchVector& patches : blockPatches_) {
1411
for (ControlFlowPatch& p : patches) {
1412
MBasicBlock* block = p.ins->block();
1413
if (block->loopDepth() >= loopEntry->loopDepth()) {
1414
fixupRedundantPhis(block);
1415
}
1416
}
1417
}
1418
1419
// The loop body, if any, might be referencing recycled phis too.
1420
if (loopBody) {
1421
fixupRedundantPhis(loopBody);
1422
}
1423
1424
// Discard redundant phis and add to the free list.
1425
for (MPhiIterator phi = loopEntry->phisBegin();
1426
phi != loopEntry->phisEnd();) {
1427
MPhi* entryDef = *phi++;
1428
if (!entryDef->isUnused()) {
1429
continue;
1430
}
1431
1432
entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
1433
loopEntry->discardPhi(entryDef);
1434
mirGraph().addPhiToFreeList(entryDef);
1435
}
1436
1437
return true;
1438
}
1439
1440
public:
1441
bool closeLoop(MBasicBlock* loopHeader, DefVector* loopResults) {
1442
MOZ_ASSERT(blockDepth_ >= 1);
1443
MOZ_ASSERT(loopDepth_);
1444
1445
uint32_t headerLabel = blockDepth_ - 1;
1446
1447
if (!loopHeader) {
1448
MOZ_ASSERT(inDeadCode());
1449
MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
1450
blockPatches_[headerLabel].empty());
1451
blockDepth_--;
1452
loopDepth_--;
1453
return true;
1454
}
1455
1456
// Op::Loop doesn't have an implicit backedge so temporarily set
1457
// aside the end of the loop body to bind backedges.
1458
MBasicBlock* loopBody = curBlock_;
1459
curBlock_ = nullptr;
1460
1461
// As explained in bug 1253544, Ion apparently has an invariant that
1462
// there is only one backedge to loop headers. To handle wasm's ability
1463
// to have multiple backedges to the same loop header, we bind all those
1464
// branches as forward jumps to a single backward jump. This is
1465
// unfortunate but the optimizer is able to fold these into single jumps
1466
// to backedges.
1467
DefVector backedgeValues;
1468
if (!bindBranches(headerLabel, &backedgeValues)) {
1469
return false;
1470
}
1471
1472
MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
1473
1474
if (curBlock_) {
1475
// We're on the loop backedge block, created by bindBranches.
1476
for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
1477
curBlock_->pop();
1478
}
1479
1480
if (!pushDefs(backedgeValues)) {
1481
return false;
1482
}
1483
1484
MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
1485
curBlock_->end(MGoto::New(alloc(), loopHeader));
1486
if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
1487
backedgeValues.length())) {
1488
return false;
1489
}
1490
}
1491
1492
curBlock_ = loopBody;
1493
1494
loopDepth_--;
1495
1496
// If the loop depth still at the inner loop body, correct it.
1497
if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
1498
MBasicBlock* out;
1499
if (!goToNewBlock(curBlock_, &out)) {
1500
return false;
1501
}
1502
curBlock_ = out;
1503
}
1504
1505
blockDepth_ -= 1;
1506
return inDeadCode() || popPushedDefs(loopResults);
1507
}
1508
1509
bool addControlFlowPatch(MControlInstruction* ins, uint32_t relative,
1510
uint32_t index) {
1511
MOZ_ASSERT(relative < blockDepth_);
1512
uint32_t absolute = blockDepth_ - 1 - relative;
1513
1514
if (absolute >= blockPatches_.length() &&
1515
!blockPatches_.resize(absolute + 1)) {
1516
return false;
1517
}
1518
1519
return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
1520
}
1521
1522
bool br(uint32_t relativeDepth, const DefVector& values) {
1523
if (inDeadCode()) {
1524
return true;
1525
}
1526
1527
MGoto* jump = MGoto::New(alloc());
1528
if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
1529
return false;
1530
}
1531
1532
if (!pushDefs(values)) {
1533
return false;
1534
}
1535
1536
curBlock_->end(jump);
1537
curBlock_ = nullptr;
1538
return true;
1539
}
1540
1541
bool brIf(uint32_t relativeDepth, const DefVector& values,
1542
MDefinition* condition) {
1543
if (inDeadCode()) {
1544
return true;
1545
}
1546
1547
MBasicBlock* joinBlock = nullptr;
1548
if (!newBlock(curBlock_, &joinBlock)) {
1549
return false;
1550
}
1551
1552
MTest* test = MTest::New(alloc(), condition, joinBlock);
1553
if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
1554
return false;
1555
}
1556
1557
if (!pushDefs(values)) {
1558
return false;
1559
}
1560
1561
curBlock_->end(test);
1562
curBlock_ = joinBlock;
1563
return true;
1564
}
1565
1566
bool brTable(MDefinition* operand, uint32_t defaultDepth,
1567
const Uint32Vector& depths, const DefVector& values) {
1568
if (inDeadCode()) {
1569
return true;
1570
}
1571
1572
size_t numCases = depths.length();
1573
MOZ_ASSERT(numCases <= INT32_MAX);
1574
MOZ_ASSERT(numCases);
1575
1576
MTableSwitch* table =
1577
MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
1578
1579
size_t defaultIndex;
1580
if (!table->addDefault(nullptr, &defaultIndex)) {
1581
return false;
1582
}
1583
if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
1584
return false;
1585
}
1586
1587
typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>,
1588
SystemAllocPolicy>
1589
IndexToCaseMap;
1590
1591
IndexToCaseMap indexToCase;
1592
if (!indexToCase.put(defaultDepth, defaultIndex)) {
1593
return false;
1594
}
1595
1596
for (size_t i = 0; i < numCases; i++) {
1597
uint32_t depth = depths[i];
1598
1599
size_t caseIndex;
1600
IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
1601
if (!p) {
1602
if (!table->addSuccessor(nullptr, &caseIndex)) {
1603
return false;
1604
}
1605
if (!addControlFlowPatch(table, depth, caseIndex)) {
1606
return false;
1607
}
1608
if (!indexToCase.add(p, depth, caseIndex)) {
1609
return false;
1610
}
1611
} else {
1612
caseIndex = p->value();
1613
}
1614
1615
if (!table->addCase(caseIndex)) {
1616
return false;
1617
}
1618
}
1619
1620
if (!pushDefs(values)) {
1621
return false;
1622
}
1623
1624
curBlock_->end(table);
1625
curBlock_ = nullptr;
1626
1627
return true;
1628
}
1629
1630
/************************************************************ DECODING ***/
1631
1632
uint32_t readCallSiteLineOrBytecode() {
1633
if (!func_.callSiteLineNums.empty()) {
1634
return func_.callSiteLineNums[lastReadCallSite_++];
1635
}
1636
return iter_.lastOpcodeOffset();
1637
}
1638
1639
#if DEBUG
1640
bool done() const { return iter_.done(); }
1641
#endif
1642
1643
/*************************************************************************/
1644
private:
1645
bool newBlock(MBasicBlock* pred, MBasicBlock** block) {
1646
*block = MBasicBlock::New(mirGraph(), info(), pred, MBasicBlock::NORMAL);
1647
if (!*block) {
1648
return false;
1649
}
1650
mirGraph().addBlock(*block);
1651
(*block)->setLoopDepth(loopDepth_);
1652
return true;
1653
}
1654
1655
bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
1656
if (!newBlock(pred, block)) {
1657
return false;
1658
}
1659
pred->end(MGoto::New(alloc(), *block));
1660
return true;
1661
}
1662
1663
bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
1664
MOZ_ASSERT(prev);
1665
MOZ_ASSERT(next);
1666
prev->end(MGoto::New(alloc(), next));
1667
return next->addPredecessor(alloc(), prev);
1668
}
1669
1670
bool bindBranches(uint32_t absolute, DefVector* defs) {
1671
if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
1672
return inDeadCode() || popPushedDefs(defs);
1673
}
1674
1675
ControlFlowPatchVector& patches = blockPatches_[absolute];
1676
MControlInstruction* ins = patches[0].ins;
1677
MBasicBlock* pred = ins->block();
1678
1679
MBasicBlock* join = nullptr;
1680
if (!newBlock(pred, &join)) {
1681
return false;
1682
}
1683
1684
pred->mark();
1685
ins->