Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/MIRGraph.h"
8
9
#include "jit/BytecodeAnalysis.h"
10
#include "jit/Ion.h"
11
#include "jit/JitSpewer.h"
12
#include "jit/MIR.h"
13
#include "jit/MIRGenerator.h"
14
#include "wasm/WasmTypes.h"
15
16
using namespace js;
17
using namespace js::jit;
18
19
MIRGenerator::MIRGenerator(CompileRealm* realm,
20
const JitCompileOptions& options,
21
TempAllocator* alloc, MIRGraph* graph,
22
const CompileInfo* info,
23
const OptimizationInfo* optimizationInfo)
24
: realm(realm),
25
runtime(realm ? realm->runtime() : nullptr),
26
outerInfo_(info),
27
optimizationInfo_(optimizationInfo),
28
alloc_(alloc),
29
graph_(graph),
30
offThreadStatus_(Ok()),
31
cancelBuild_(false),
32
wasmMaxStackArgBytes_(0),
33
needsOverrecursedCheck_(false),
34
needsStaticStackAlignment_(false),
35
instrumentedProfiling_(false),
36
instrumentedProfilingIsCached_(false),
37
safeForMinorGC_(true),
38
stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
39
: false),
40
minWasmHeapLength_(0),
41
options(options),
42
gs_(alloc) {}
43
44
mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
45
if (JitSpewEnabled(JitSpew_IonAbort)) {
46
switch (r) {
47
case AbortReason::Alloc:
48
JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
49
break;
50
case AbortReason::Inlining:
51
JitSpew(JitSpew_IonAbort, "AbortReason::Inlining");
52
break;
53
case AbortReason::PreliminaryObjects:
54
JitSpew(JitSpew_IonAbort, "AbortReason::PreliminaryObjects");
55
break;
56
case AbortReason::Disable:
57
JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
58
break;
59
case AbortReason::Error:
60
JitSpew(JitSpew_IonAbort, "AbortReason::Error");
61
break;
62
case AbortReason::NoAbort:
63
MOZ_CRASH("Abort with AbortReason::NoAbort");
64
break;
65
}
66
}
67
return Err(std::move(r));
68
}
69
70
mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
71
AbortReason r, const char* message, va_list ap) {
72
JitSpewVA(JitSpew_IonAbort, message, ap);
73
return Err(std::move(r));
74
}
75
76
mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
77
AbortReason r, const char* message, ...) {
78
va_list ap;
79
va_start(ap, message);
80
auto forward = abortFmt(r, message, ap);
81
va_end(ap);
82
return forward;
83
}
84
85
void MIRGraph::addBlock(MBasicBlock* block) {
86
MOZ_ASSERT(block);
87
block->setId(blockIdGen_++);
88
blocks_.pushBack(block);
89
numBlocks_++;
90
}
91
92
void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
93
block->setId(blockIdGen_++);
94
blocks_.insertAfter(at, block);
95
numBlocks_++;
96
}
97
98
void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
99
block->setId(blockIdGen_++);
100
blocks_.insertBefore(at, block);
101
numBlocks_++;
102
}
103
104
bool MIRGraph::removeSuccessorBlocks(MBasicBlock* start) {
105
if (!start->hasLastIns()) {
106
return true;
107
}
108
109
start->mark();
110
111
// Mark all successors.
112
Vector<MBasicBlock*, 4, SystemAllocPolicy> blocks;
113
for (size_t i = 0; i < start->numSuccessors(); i++) {
114
if (!start->getSuccessor(i)) {
115
continue;
116
}
117
if (start->getSuccessor(i)->isMarked()) {
118
continue;
119
}
120
if (!blocks.append(start->getSuccessor(i))) {
121
return false;
122
}
123
start->getSuccessor(i)->mark();
124
}
125
for (size_t i = 0; i < blocks.length(); i++) {
126
MBasicBlock* block = blocks[i];
127
if (!block->hasLastIns()) {
128
continue;
129
}
130
131
for (size_t j = 0; j < block->numSuccessors(); j++) {
132
if (!block->getSuccessor(j)) {
133
continue;
134
}
135
if (block->getSuccessor(j)->isMarked()) {
136
continue;
137
}
138
if (!blocks.append(block->getSuccessor(j))) {
139
return false;
140
}
141
block->getSuccessor(j)->mark();
142
}
143
}
144
145
if (osrBlock()) {
146
if (osrBlock()->getSuccessor(0)->isMarked()) {
147
osrBlock()->mark();
148
}
149
}
150
151
// Remove blocks.
152
// If they don't have any predecessor
153
for (size_t i = 0; i < blocks.length(); i++) {
154
MBasicBlock* block = blocks[i];
155
bool allMarked = true;
156
for (size_t i = 0; i < block->numPredecessors(); i++) {
157
if (block->getPredecessor(i)->isMarked()) {
158
continue;
159
}
160
allMarked = false;
161
break;
162
}
163
if (allMarked) {
164
removeBlock(block);
165
} else {
166
MOZ_ASSERT(block != osrBlock());
167
for (size_t j = 0; j < block->numPredecessors();) {
168
if (!block->getPredecessor(j)->isMarked()) {
169
j++;
170
continue;
171
}
172
block->removePredecessor(block->getPredecessor(j));
173
}
174
// This shouldn't have any instructions yet.
175
MOZ_ASSERT(block->begin() == block->end());
176
}
177
}
178
179
if (osrBlock()) {
180
if (osrBlock()->getSuccessor(0)->isDead()) {
181
removeBlock(osrBlock());
182
}
183
}
184
185
for (size_t i = 0; i < blocks.length(); i++) {
186
blocks[i]->unmark();
187
}
188
start->unmark();
189
190
return true;
191
}
192
193
void MIRGraph::removeBlock(MBasicBlock* block) {
194
// Remove a block from the graph. It will also cleanup the block.
195
196
if (block == osrBlock_) {
197
osrBlock_ = nullptr;
198
}
199
200
if (returnAccumulator_) {
201
size_t i = 0;
202
while (i < returnAccumulator_->length()) {
203
if ((*returnAccumulator_)[i] == block) {
204
returnAccumulator_->erase(returnAccumulator_->begin() + i);
205
} else {
206
i++;
207
}
208
}
209
}
210
211
block->clear();
212
block->markAsDead();
213
214
if (block->isInList()) {
215
blocks_.remove(block);
216
numBlocks_--;
217
}
218
}
219
220
void MIRGraph::removeBlockIncludingPhis(MBasicBlock* block) {
221
// removeBlock doesn't clear phis because of IonBuilder constraints. Here,
222
// we want to totally clear everything.
223
removeBlock(block);
224
block->discardAllPhis();
225
}
226
227
void MIRGraph::unmarkBlocks() {
228
for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
229
i->unmark();
230
}
231
}
232
233
MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
234
const CompileInfo& info, MBasicBlock* maybePred,
235
BytecodeSite* site, Kind kind) {
236
MOZ_ASSERT(site->pc() != nullptr);
237
238
MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
239
if (!block->init()) {
240
return nullptr;
241
}
242
243
if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
244
return nullptr;
245
}
246
247
return block;
248
}
249
250
MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
251
MBasicBlock* pred, BytecodeSite* site,
252
Kind kind, uint32_t popped) {
253
MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
254
if (!block->init()) {
255
return nullptr;
256
}
257
258
if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
259
return nullptr;
260
}
261
262
return block;
263
}
264
265
MBasicBlock* MBasicBlock::NewWithResumePoint(MIRGraph& graph,
266
const CompileInfo& info,
267
MBasicBlock* pred,
268
BytecodeSite* site,
269
MResumePoint* resumePoint) {
270
MBasicBlock* block =
271
new (graph.alloc()) MBasicBlock(graph, info, site, NORMAL);
272
273
MOZ_ASSERT(!resumePoint->instruction());
274
resumePoint->block()->discardResumePoint(resumePoint, RefType_None);
275
resumePoint->setBlock(block);
276
block->addResumePoint(resumePoint);
277
block->entryResumePoint_ = resumePoint;
278
279
if (!block->init()) {
280
return nullptr;
281
}
282
283
if (!block->inheritResumePoint(pred)) {
284
return nullptr;
285
}
286
287
return block;
288
}
289
290
MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
291
const CompileInfo& info,
292
MBasicBlock* pred,
293
BytecodeSite* site) {
294
MOZ_ASSERT(site->pc() != nullptr);
295
296
MBasicBlock* block =
297
new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
298
if (!block->init()) {
299
return nullptr;
300
}
301
302
if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
303
return nullptr;
304
}
305
306
return block;
307
}
308
309
MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
310
size_t predEdgeIdx, MBasicBlock* succ) {
311
MBasicBlock* split = nullptr;
312
if (!succ->pc()) {
313
// The predecessor does not have a PC, this is a Wasm compilation.
314
split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
315
if (!split) {
316
return nullptr;
317
}
318
} else {
319
// The predecessor has a PC, this is an IonBuilder compilation.
320
MResumePoint* succEntry = succ->entryResumePoint();
321
322
BytecodeSite* site =
323
new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
324
split =
325
new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
326
327
if (!split->init()) {
328
return nullptr;
329
}
330
331
// A split edge is used to simplify the graph to avoid having a
332
// predecessor with multiple successors as well as a successor with
333
// multiple predecessors. As instructions can be moved in this
334
// split-edge block, we need to give this block a resume point. To do
335
// so, we copy the entry resume points of the successor and filter the
336
// phis to keep inputs from the current edge.
337
338
// Propagate the caller resume point from the inherited block.
339
split->callerResumePoint_ = succ->callerResumePoint();
340
341
// Split-edge are created after the interpreter stack emulation. Thus,
342
// there is no need for creating slots.
343
split->stackPosition_ = succEntry->stackDepth();
344
345
// Create a resume point using our initial stack position.
346
MResumePoint* splitEntry = new (graph.alloc())
347
MResumePoint(split, succEntry->pc(), MResumePoint::ResumeAt);
348
if (!splitEntry->init(graph.alloc())) {
349
return nullptr;
350
}
351
split->entryResumePoint_ = splitEntry;
352
353
// The target entry resume point might have phi operands, keep the
354
// operands of the phi coming from our edge.
355
size_t succEdgeIdx = succ->indexForPredecessor(pred);
356
357
for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
358
MDefinition* def = succEntry->getOperand(i);
359
// This early in the pipeline, we have no recover instructions in
360
// any entry resume point.
361
MOZ_ASSERT_IF(def->block() == succ, def->isPhi());
362
if (def->block() == succ) {
363
def = def->toPhi()->getOperand(succEdgeIdx);
364
}
365
366
splitEntry->initOperand(i, def);
367
}
368
369
// This is done in the New variant for wasm, so we cannot keep this
370
// line below, where the rest of the graph is modified.
371
if (!split->predecessors_.append(pred)) {
372
return nullptr;
373
}
374
}
375
376
split->setLoopDepth(succ->loopDepth());
377
378
// Insert the split edge block in-between.
379
split->end(MGoto::New(graph.alloc(), succ));
380
381
graph.insertBlockAfter(pred, split);
382
383
pred->replaceSuccessor(predEdgeIdx, split);
384
succ->replacePredecessor(pred, split);
385
return split;
386
}
387
388
MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
389
MBasicBlock* pred, Kind kind) {
390
BytecodeSite* site = new (graph.alloc()) BytecodeSite();
391
MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
392
if (!block->init()) {
393
return nullptr;
394
}
395
396
if (pred) {
397
block->stackPosition_ = pred->stackPosition_;
398
399
if (block->kind_ == PENDING_LOOP_HEADER) {
400
size_t nphis = block->stackPosition_;
401
402
size_t nfree = graph.phiFreeListLength();
403
404
TempAllocator& alloc = graph.alloc();
405
MPhi* phis = nullptr;
406
if (nphis > nfree) {
407
phis = alloc.allocateArray<MPhi>(nphis - nfree);
408
if (!phis) {
409
return nullptr;
410
}
411
}
412
413
// Note: Phis are inserted in the same order as the slots.
414
for (size_t i = 0; i < nphis; i++) {
415
MDefinition* predSlot = pred->getSlot(i);
416
417
MOZ_ASSERT(predSlot->type() != MIRType::Value);
418
419
MPhi* phi;
420
if (i < nfree) {
421
phi = graph.takePhiFromFreeList();
422
} else {
423
phi = phis + (i - nfree);
424
}
425
new (phi) MPhi(alloc, predSlot->type());
426
427
phi->addInlineInput(predSlot);
428
429
// Add append Phis in the block.
430
block->addPhi(phi);
431
block->setSlot(i, phi);
432
}
433
} else {
434
if (!block->ensureHasSlots(0)) {
435
return nullptr;
436
}
437
block->copySlots(pred);
438
}
439
440
if (!block->predecessors_.append(pred)) {
441
return nullptr;
442
}
443
}
444
445
return block;
446
}
447
448
MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
449
BytecodeSite* site, Kind kind)
450
: unreachable_(false),
451
specialized_(false),
452
graph_(graph),
453
info_(info),
454
predecessors_(graph.alloc()),
455
stackPosition_(info_.firstStackSlot()),
456
id_(0),
457
domIndex_(0),
458
numDominated_(0),
459
pc_(site->pc()),
460
lir_(nullptr),
461
callerResumePoint_(nullptr),
462
entryResumePoint_(nullptr),
463
outerResumePoint_(nullptr),
464
successorWithPhis_(nullptr),
465
positionInPhiSuccessor_(0),
466
loopDepth_(0),
467
kind_(kind),
468
mark_(false),
469
immediatelyDominated_(graph.alloc()),
470
immediateDominator_(nullptr),
471
trackedSite_(site),
472
hitCount_(0),
473
hitState_(HitState::NotDefined)
474
#if defined(JS_ION_PERF) || defined(DEBUG)
475
,
476
lineno_(0u),
477
columnIndex_(0u)
478
#endif
479
{
480
}
481
482
bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
483
484
bool MBasicBlock::increaseSlots(size_t num) {
485
return slots_.growBy(graph_.alloc(), num);
486
}
487
488
bool MBasicBlock::ensureHasSlots(size_t num) {
489
size_t depth = stackDepth() + num;
490
if (depth > nslots()) {
491
if (!increaseSlots(depth - nslots())) {
492
return false;
493
}
494
}
495
return true;
496
}
497
498
void MBasicBlock::copySlots(MBasicBlock* from) {
499
MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
500
MOZ_ASSERT(stackPosition_ <= nslots());
501
502
MDefinition** thisSlots = slots_.begin();
503
MDefinition** fromSlots = from->slots_.begin();
504
for (size_t i = 0, e = stackPosition_; i < e; ++i) {
505
thisSlots[i] = fromSlots[i];
506
}
507
}
508
509
bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
510
MBasicBlock* maybePred, uint32_t popped) {
511
MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
512
513
MOZ_ASSERT(stackDepth >= popped);
514
stackDepth -= popped;
515
stackPosition_ = stackDepth;
516
517
if (maybePred && kind_ != PENDING_LOOP_HEADER) {
518
copySlots(maybePred);
519
}
520
521
MOZ_ASSERT(info_.nslots() >= stackPosition_);
522
MOZ_ASSERT(!entryResumePoint_);
523
524
// Propagate the caller resume point from the inherited block.
525
callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
526
527
// Create a resume point using our initial stack state.
528
entryResumePoint_ =
529
new (alloc) MResumePoint(this, pc(), MResumePoint::ResumeAt);
530
if (!entryResumePoint_->init(alloc)) {
531
return false;
532
}
533
534
if (maybePred) {
535
if (!predecessors_.append(maybePred)) {
536
return false;
537
}
538
539
if (kind_ == PENDING_LOOP_HEADER) {
540
for (size_t i = 0; i < stackDepth; i++) {
541
MPhi* phi = MPhi::New(alloc.fallible());
542
if (!phi) {
543
return false;
544
}
545
phi->addInlineInput(maybePred->getSlot(i));
546
addPhi(phi);
547
setSlot(i, phi);
548
entryResumePoint()->initOperand(i, phi);
549
}
550
} else {
551
for (size_t i = 0; i < stackDepth; i++) {
552
entryResumePoint()->initOperand(i, getSlot(i));
553
}
554
}
555
} else {
556
/*
557
* Don't leave the operands uninitialized for the caller, as it may not
558
* initialize them later on.
559
*/
560
for (size_t i = 0; i < stackDepth; i++) {
561
entryResumePoint()->clearOperand(i);
562
}
563
}
564
565
return true;
566
}
567
568
bool MBasicBlock::inheritResumePoint(MBasicBlock* pred) {
569
// Copy slots from the resume point.
570
stackPosition_ = entryResumePoint_->stackDepth();
571
for (uint32_t i = 0; i < stackPosition_; i++) {
572
slots_[i] = entryResumePoint_->getOperand(i);
573
}
574
575
MOZ_ASSERT(info_.nslots() >= stackPosition_);
576
MOZ_ASSERT(kind_ != PENDING_LOOP_HEADER);
577
MOZ_ASSERT(pred != nullptr);
578
579
callerResumePoint_ = pred->callerResumePoint();
580
581
if (!predecessors_.append(pred)) {
582
return false;
583
}
584
585
return true;
586
}
587
588
void MBasicBlock::inheritSlots(MBasicBlock* parent) {
589
stackPosition_ = parent->stackPosition_;
590
copySlots(parent);
591
}
592
593
bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
594
// Remove the previous resume point.
595
discardResumePoint(entryResumePoint_);
596
597
// Create a resume point using our initial stack state.
598
entryResumePoint_ =
599
MResumePoint::New(alloc, this, pc(), MResumePoint::ResumeAt);
600
if (!entryResumePoint_) {
601
return false;
602
}
603
return true;
604
}
605
606
void MBasicBlock::shimmySlots(int discardDepth) {
607
// Move all slots above the given depth down by one,
608
// overwriting the MDefinition at discardDepth.
609
610
MOZ_ASSERT(discardDepth < 0);
611
MOZ_ASSERT(stackPosition_ + discardDepth >= info_.firstStackSlot());
612
613
for (int i = discardDepth; i < -1; i++) {
614
slots_[stackPosition_ + i] = slots_[stackPosition_ + i + 1];
615
}
616
617
--stackPosition_;
618
}
619
620
bool MBasicBlock::linkOsrValues(MStart* start) {
621
MResumePoint* res = start->resumePoint();
622
623
for (uint32_t i = 0; i < stackDepth(); i++) {
624
MDefinition* def = slots_[i];
625
MInstruction* cloneRp = nullptr;
626
if (i == info().environmentChainSlot()) {
627
if (def->isOsrEnvironmentChain()) {
628
cloneRp = def->toOsrEnvironmentChain();
629
}
630
} else if (i == info().returnValueSlot()) {
631
if (def->isOsrReturnValue()) {
632
cloneRp = def->toOsrReturnValue();
633
}
634
} else if (info().hasArguments() && i == info().argsObjSlot()) {
635
MOZ_ASSERT(def->isConstant() || def->isOsrArgumentsObject());
636
MOZ_ASSERT_IF(def->isConstant(),
637
def->toConstant()->type() == MIRType::Undefined);
638
if (def->isOsrArgumentsObject()) {
639
cloneRp = def->toOsrArgumentsObject();
640
}
641
} else {
642
MOZ_ASSERT(def->isOsrValue() || def->isGetArgumentsObjectArg() ||
643
def->isConstant() || def->isParameter());
644
645
// A constant Undefined can show up here for an argument slot when
646
// the function has an arguments object, but the argument in
647
// question is stored on the scope chain.
648
MOZ_ASSERT_IF(def->isConstant(),
649
def->toConstant()->type() == MIRType::Undefined);
650
651
if (def->isOsrValue()) {
652
cloneRp = def->toOsrValue();
653
} else if (def->isGetArgumentsObjectArg()) {
654
cloneRp = def->toGetArgumentsObjectArg();
655
} else if (def->isParameter()) {
656
cloneRp = def->toParameter();
657
}
658
}
659
660
if (cloneRp) {
661
MResumePoint* clone = MResumePoint::Copy(graph().alloc(), res);
662
if (!clone) {
663
return false;
664
}
665
cloneRp->setResumePoint(clone);
666
}
667
}
668
669
return true;
670
}
671
672
void MBasicBlock::rewriteAtDepth(int32_t depth, MDefinition* ins) {
673
MOZ_ASSERT(depth < 0);
674
MOZ_ASSERT(stackPosition_ + depth >= info_.firstStackSlot());
675
rewriteSlot(stackPosition_ + depth, ins);
676
}
677
678
MDefinition* MBasicBlock::environmentChain() {
679
return getSlot(info().environmentChainSlot());
680
}
681
682
MDefinition* MBasicBlock::argumentsObject() {
683
return getSlot(info().argsObjSlot());
684
}
685
686
void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
687
setSlot(info().environmentChainSlot(), scopeObj);
688
}
689
690
void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
691
setSlot(info().argsObjSlot(), argsObj);
692
}
693
694
void MBasicBlock::pick(int32_t depth) {
695
// pick takes a value and moves it to the top.
696
// pick(-2):
697
// A B C D E
698
// A B D C E [ swapAt(-2) ]
699
// A B D E C [ swapAt(-1) ]
700
for (; depth < 0; depth++) {
701
swapAt(depth);
702
}
703
}
704
705
void MBasicBlock::unpick(int32_t depth) {
706
// unpick takes the value on top of the stack and moves it under the depth-th
707
// element;
708
// unpick(-2):
709
// A B C D E
710
// A B C E D [ swapAt(-1) ]
711
// A B E C D [ swapAt(-2) ]
712
for (int32_t n = -1; n >= depth; n--) {
713
swapAt(n);
714
}
715
}
716
717
void MBasicBlock::swapAt(int32_t depth) {
718
uint32_t lhsDepth = stackPosition_ + depth - 1;
719
uint32_t rhsDepth = stackPosition_ + depth;
720
721
MDefinition* temp = slots_[lhsDepth];
722
slots_[lhsDepth] = slots_[rhsDepth];
723
slots_[rhsDepth] = temp;
724
}
725
726
void MBasicBlock::discardLastIns() { discard(lastIns()); }
727
728
MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
729
// If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
730
// then reuse it.
731
MInstruction* ins = *begin();
732
if (ins->type() == MIRType::MagicOptimizedOut) {
733
return ins->toConstant();
734
}
735
736
MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
737
insertBefore(ins, constant);
738
return constant;
739
}
740
741
void MBasicBlock::addFromElsewhere(MInstruction* ins) {
742
MOZ_ASSERT(ins->block() != this);
743
744
// Remove |ins| from its containing block.
745
ins->block()->instructions_.remove(ins);
746
747
// Add it to this block.
748
add(ins);
749
}
750
751
void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
752
// Remove |ins| from the current block.
753
MOZ_ASSERT(ins->block() == this);
754
instructions_.remove(ins);
755
756
// Insert into new block, which may be distinct.
757
// Uses and operands are untouched.
758
ins->setBlock(at->block());
759
at->block()->instructions_.insertBefore(at, ins);
760
ins->setTrackedSite(at->trackedSite());
761
}
762
763
MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
764
MOZ_ASSERT(graph().osrBlock() != this,
765
"We are not supposed to add any instruction in OSR blocks.");
766
767
// Beta nodes and interrupt checks are required to be located at the
768
// beginnings of basic blocks, so we must insert new instructions after any
769
// such instructions.
770
MInstructionIterator insertIter =
771
!ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
772
while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
773
insertIter->isConstant() || insertIter->isParameter() ||
774
(!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
775
insertIter++;
776
}
777
778
return *insertIter;
779
}
780
781
void MBasicBlock::discardResumePoint(
782
MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
783
if (refType & RefType_DiscardOperands) {
784
rp->releaseUses();
785
}
786
#ifdef DEBUG
787
MResumePointIterator iter = resumePointsBegin();
788
while (*iter != rp) {
789
// We should reach it before reaching the end.
790
MOZ_ASSERT(iter != resumePointsEnd());
791
iter++;
792
}
793
resumePoints_.removeAt(iter);
794
#endif
795
}
796
797
void MBasicBlock::prepareForDiscard(
798
MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
799
// Only remove instructions from the same basic block. This is needed for
800
// correctly removing the resume point if any.
801
MOZ_ASSERT(ins->block() == this);
802
803
MResumePoint* rp = ins->resumePoint();
804
if ((refType & RefType_DiscardResumePoint) && rp) {
805
discardResumePoint(rp, refType);
806
}
807
808
// We need to assert that instructions have no uses after removing the their
809
// resume points operands as they could be captured by their own resume
810
// point.
811
MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
812
813
const uint32_t InstructionOperands =
814
RefType_DiscardOperands | RefType_DiscardInstruction;
815
if ((refType & InstructionOperands) == InstructionOperands) {
816
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
817
ins->releaseOperand(i);
818
}
819
}
820
821
ins->setDiscarded();
822
}
823
824
void MBasicBlock::discard(MInstruction* ins) {
825
prepareForDiscard(ins);
826
instructions_.remove(ins);
827
}
828
829
void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
830
#ifdef DEBUG
831
for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
832
MOZ_ASSERT(!ins->hasOperand(i));
833
}
834
#endif
835
836
prepareForDiscard(ins, RefType_IgnoreOperands);
837
instructions_.remove(ins);
838
}
839
840
void MBasicBlock::discardDef(MDefinition* at) {
841
if (at->isPhi()) {
842
at->block()->discardPhi(at->toPhi());
843
} else {
844
at->block()->discard(at->toInstruction());
845
}
846
}
847
848
void MBasicBlock::discardAllInstructions() {
849
MInstructionIterator iter = begin();
850
discardAllInstructionsStartingAt(iter);
851
}
852
853
void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
854
while (iter != end()) {
855
// Discard operands and resume point operands and flag the instruction
856
// as discarded. Also we do not assert that we have no uses as blocks
857
// might be removed in reverse post order.
858
MInstruction* ins = *iter++;
859
prepareForDiscard(ins, RefType_DefaultNoAssert);
860
instructions_.remove(ins);
861
}
862
}
863
864
void MBasicBlock::discardAllPhiOperands() {
865
for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
866
iter->removeAllOperands();
867
}
868
869
for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
870
pred++) {
871
(*pred)->clearSuccessorWithPhis();
872
}
873
}
874
875
void MBasicBlock::discardAllPhis() {
876
discardAllPhiOperands();
877
phis_.clear();
878
}
879
880
void MBasicBlock::discardAllResumePoints(bool discardEntry) {
881
if (outerResumePoint_) {
882
clearOuterResumePoint();
883
}
884
885
if (discardEntry && entryResumePoint_) {
886
clearEntryResumePoint();
887
}
888
889
#ifdef DEBUG
890
if (!entryResumePoint()) {
891
MOZ_ASSERT(resumePointsEmpty());
892
} else {
893
MResumePointIterator iter(resumePointsBegin());
894
MOZ_ASSERT(iter != resumePointsEnd());
895
iter++;
896
MOZ_ASSERT(iter == resumePointsEnd());
897
}
898
#endif
899
}
900
901
void MBasicBlock::clear() {
902
discardAllInstructions();
903
discardAllResumePoints();
904
905
// Note: phis are disconnected from the rest of the graph, but are not
906
// removed entirely. If the block being removed is a loop header then
907
// IonBuilder may need to access these phis to more quickly converge on the
908
// possible types in the graph. See IonBuilder::analyzeNewLoopTypes.
909
discardAllPhiOperands();
910
}
911
912
void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
913
MOZ_ASSERT(at->block() == this);
914
ins->setBlock(this);
915
graph().allocDefinitionId(ins);
916
instructions_.insertBefore(at, ins);
917
ins->setTrackedSite(at->trackedSite());
918
}
919
920
void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
921
MOZ_ASSERT(at->block() == this);
922
ins->setBlock(this);
923
graph().allocDefinitionId(ins);
924
instructions_.insertAfter(at, ins);
925
ins->setTrackedSite(at->trackedSite());
926
}
927
928
void MBasicBlock::insertAtEnd(MInstruction* ins) {
929
if (hasLastIns()) {
930
insertBefore(lastIns(), ins);
931
} else {
932
add(ins);
933
}
934
}
935
936
void MBasicBlock::addPhi(MPhi* phi) {
937
phis_.pushBack(phi);
938
phi->setBlock(this);
939
graph().allocDefinitionId(phi);
940
}
941
942
void MBasicBlock::discardPhi(MPhi* phi) {
943
MOZ_ASSERT(!phis_.empty());
944
945
phi->removeAllOperands();
946
phi->setDiscarded();
947
948
phis_.remove(phi);
949
950
if (phis_.empty()) {
951
for (MBasicBlock* pred : predecessors_) {
952
pred->clearSuccessorWithPhis();
953
}
954
}
955
}
956
957
void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
958
// Find the previous resume point which would be used for bailing out.
959
MResumePoint* rp = nullptr;
960
for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
961
rp = iter->resumePoint();
962
if (rp) {
963
break;
964
}
965
}
966
967
// If none, take the entry resume point.
968
if (!rp) {
969
rp = entryResumePoint();
970
}
971
972
// The only blocks which do not have any entryResumePoint in Ion, are the
973
// SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
974
// Range Analysis phase. In adjustInputs, we are manipulating instructions
975
// which have a TypePolicy. So, as a Goto has no operand and no type
976
// policy, the entry resume point should exists.
977
MOZ_ASSERT(rp);
978
979
// Flag all operand as being potentially used.
980
while (rp) {
981
for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
982
rp->getOperand(i)->setUseRemovedUnchecked();
983
}
984
rp = rp->caller();
985
}
986
}
987
988
bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
989
return addPredecessorPopN(alloc, pred, 0);
990
}
991
992
bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
993
uint32_t popped) {
994
MOZ_ASSERT(pred);
995
MOZ_ASSERT(predecessors_.length() > 0);
996
997
// Predecessors must be finished, and at the correct stack depth.
998
MOZ_ASSERT(pred->hasLastIns());
999
MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
1000
1001
for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
1002
MDefinition* mine = getSlot(i);
1003
MDefinition* other = pred->getSlot(i);
1004
1005
if (mine != other) {
1006
// If the current instruction is a phi, and it was created in this
1007
// basic block, then we have already placed this phi and should
1008
// instead append to its operands.
1009
if (mine->isPhi() && mine->block() == this) {
1010
MOZ_ASSERT(predecessors_.length());
1011
if (!mine->toPhi()->addInputSlow(other)) {
1012
return false;
1013
}
1014
} else {
1015
// Otherwise, create a new phi node.
1016
MPhi* phi;
1017
if (mine->type() == other->type()) {
1018
phi = MPhi::New(alloc.fallible(), mine->type());
1019
} else {
1020
phi = MPhi::New(alloc.fallible());
1021
}
1022
if (!phi) {
1023
return false;
1024
}
1025
addPhi(phi);
1026
1027
// Prime the phi for each predecessor, so input(x) comes from
1028
// predecessor(x).
1029
if (!phi->reserveLength(predecessors_.length() + 1)) {
1030
return false;
1031
}
1032
1033
for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
1034
++j) {
1035
MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
1036
phi->addInput(mine);
1037
}
1038
phi->addInput(other);
1039
1040
setSlot(i, phi);
1041
if (entryResumePoint()) {
1042
entryResumePoint()->replaceOperand(i, phi);
1043
}
1044
}
1045
}
1046
}
1047
1048
return predecessors_.append(pred);
1049
}
1050
1051
bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
1052
MBasicBlock* existingPred) {
1053
MOZ_ASSERT(pred);
1054
MOZ_ASSERT(predecessors_.length() > 0);
1055
1056
// Predecessors must be finished, and at the correct stack depth.
1057
MOZ_ASSERT(pred->hasLastIns());
1058
MOZ_ASSERT(!pred->successorWithPhis());
1059
1060
if (!phisEmpty()) {
1061
size_t existingPosition = indexForPredecessor(existingPred);
1062
for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
1063
if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
1064
return false;
1065
}
1066
}
1067
}
1068
1069
if (!predecessors_.append(pred)) {
1070
return false;
1071
}
1072
return true;
1073
}
1074
1075
bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
1076
// Predecessors must be finished.
1077
MOZ_ASSERT(pred && pred->hasLastIns());
1078
return predecessors_.append(pred);
1079
}
1080
1081
bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
1082
return immediatelyDominated_.append(child);
1083
}
1084
1085
void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
1086
for (size_t i = 0;; ++i) {
1087
MOZ_ASSERT(i < immediatelyDominated_.length(),
1088
"Dominated block to remove not present");
1089
if (immediatelyDominated_[i] == child) {
1090
immediatelyDominated_[i] = immediatelyDominated_.back();
1091
immediatelyDominated_.popBack();
1092
return;
1093
}
1094
}
1095
}
1096
1097
void MBasicBlock::assertUsesAreNotWithin(MUseIterator use, MUseIterator end) {
1098
#ifdef DEBUG
1099
for (; use != end; use++) {
1100
MOZ_ASSERT_IF(use->consumer()->isDefinition(),
1101
use->consumer()->toDefinition()->block()->id() < id());
1102
}
1103
#endif
1104
}
1105
1106
AbortReason MBasicBlock::setBackedge(TempAllocator& alloc, MBasicBlock* pred) {
1107
// Predecessors must be finished, and at the correct stack depth.
1108
MOZ_ASSERT(hasLastIns());
1109
MOZ_ASSERT(pred->hasLastIns());
1110
MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
1111
1112
// We must be a pending loop header
1113
MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1114
1115
bool hadTypeChange = false;
1116
1117
// Add exit definitions to each corresponding phi at the entry.
1118
if (!inheritPhisFromBackedge(alloc, pred, &hadTypeChange)) {
1119
return AbortReason::Alloc;
1120
}
1121
1122
if (hadTypeChange) {
1123
return AbortReason::Disable;
1124
}
1125
1126
// We are now a loop header proper
1127
kind_ = LOOP_HEADER;
1128
1129
if (!predecessors_.append(pred)) {
1130
return AbortReason::Alloc;
1131
}
1132
1133
return AbortReason::NoAbort;
1134
}
1135
1136
bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
1137
// Predecessors must be finished, and at the correct stack depth.
1138
MOZ_ASSERT(hasLastIns());
1139
MOZ_ASSERT(pred->hasLastIns());
1140
MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
1141
1142
// We must be a pending loop header
1143
MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1144
1145
// Add exit definitions to each corresponding phi at the entry.
1146
// Note: Phis are inserted in the same order as the slots. (see
1147
// MBasicBlock::New)
1148
size_t slot = 0;
1149
for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
1150
MPhi* entryDef = *phi;
1151
MDefinition* exitDef = pred->getSlot(slot);
1152
1153
// Assert that we already placed phis for each slot.
1154
MOZ_ASSERT(entryDef->block() == this);
1155
1156
// Assert that the phi already has the correct type.
1157
MOZ_ASSERT(entryDef->type() == exitDef->type());
1158
MOZ_ASSERT(entryDef->type() != MIRType::Value);
1159
1160
if (entryDef == exitDef) {
1161
// If the exit def is the same as the entry def, make a redundant
1162
// phi. Since loop headers have exactly two incoming edges, we
1163
// know that that's just the first input.
1164
//
1165
// Note that we eliminate later rather than now, to avoid any
1166
// weirdness around pending continue edges which might still hold
1167
// onto phis.
1168
exitDef = entryDef->getOperand(0);
1169
}
1170
1171
// Phis always have room for 2 operands, so this can't fail.
1172
MOZ_ASSERT(phi->numOperands() == 1);
1173
entryDef->addInlineInput(exitDef);
1174
1175
// Two cases here: phis that correspond to locals, and phis that correspond
1176
// to loop parameters. Only phis for locals go in slots.
1177
if (slot < stackDepth()) {
1178
setSlot(slot, entryDef);
1179
}
1180
}
1181
1182
// We are now a loop header proper
1183
kind_ = LOOP_HEADER;
1184
1185
return predecessors_.append(pred);
1186
}
1187
1188
void MBasicBlock::clearLoopHeader() {
1189
MOZ_ASSERT(isLoopHeader());
1190
kind_ = NORMAL;
1191
}
1192
1193
void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
1194
MOZ_ASSERT(!isLoopHeader());
1195
kind_ = LOOP_HEADER;
1196
1197
size_t numPreds = numPredecessors();
1198
MOZ_ASSERT(numPreds != 0);
1199
1200
size_t lastIndex = numPreds - 1;
1201
size_t oldIndex = 0;
1202
for (;; ++oldIndex) {
1203
MOZ_ASSERT(oldIndex < numPreds);
1204
MBasicBlock* pred = getPredecessor(oldIndex);
1205
if (pred == newBackedge) {
1206
break;
1207
}
1208
}
1209
1210
// Set the loop backedge to be the last element in predecessors_.
1211
std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
1212
1213
// If we have phis, reorder their operands accordingly.
1214
if (!phisEmpty()) {
1215
getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
1216
getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
1217
for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1218
MPhi* phi = *iter;
1219
MDefinition* last = phi->getOperand(oldIndex);
1220
MDefinition* old = phi->getOperand(lastIndex);
1221
phi->replaceOperand(oldIndex, old);
1222
phi->replaceOperand(lastIndex, last);
1223
}
1224
}
1225
1226
MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
1227
MOZ_ASSERT(backedge() == newBackedge);
1228
}
1229
1230
size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
1231
MOZ_ASSERT(lastIns());
1232
for (size_t i = 0; i < numSuccessors(); i++) {
1233
if (getSuccessor(i) == block) {
1234
return i;
1235
}
1236
}
1237
MOZ_CRASH("Invalid successor");
1238
}
1239
1240
size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
1241
for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
1242
if (getPredecessor(i) == block) {
1243
return i;
1244
}
1245
}
1246
MOZ_CRASH("Invalid predecessor");
1247
}
1248
1249
void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
1250
MOZ_ASSERT(lastIns());
1251
1252
// Note, during split-critical-edges, successors-with-phis is not yet set.
1253
// During PAA, this case is handled before we enter.
1254
MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
1255
1256
lastIns()->replaceSuccessor(pos, split);
1257
}
1258
1259
void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
1260
for (size_t i = 0; i < numPredecessors(); i++) {
1261
if (getPredecessor(i) == old) {
1262
predecessors_[i] = split;
1263
1264
#ifdef DEBUG
1265
// The same block should not appear twice in the predecessor list.
1266
for (size_t j = i; j < numPredecessors(); j++) {
1267
MOZ_ASSERT(predecessors_[j] != old);
1268
}
1269
#endif
1270
1271
return;
1272
}
1273
}
1274
1275
MOZ_CRASH("predecessor was not found");
1276
}
1277
1278
void MBasicBlock::clearDominatorInfo() {
1279
setImmediateDominator(nullptr);
1280
immediatelyDominated_.clear();
1281
numDominated_ = 0;
1282
}
1283
1284
void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
1285
size_t predIndex) {
1286
// If we're removing the last backedge, this is no longer a loop.
1287
if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
1288
clearLoopHeader();
1289
}
1290
1291
// Adjust phis. Note that this can leave redundant phis behind.
1292
// Don't adjust successorWithPhis() if we haven't constructed this
1293
// information yet.
1294
if (pred->successorWithPhis()) {
1295
MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
1296
pred->clearSuccessorWithPhis();
1297
for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
1298
getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
1299
}
1300
}
1301
1302
// Remove from pred list.
1303
predecessors_.erase(predecessors_.begin() + predIndex);
1304
}
1305
1306
void MBasicBlock::removePredecessor(MBasicBlock* pred) {
1307
size_t predIndex = getPredecessorIndex(pred);
1308
1309
// Remove the phi operands.
1310
for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1311
iter->removeOperand(predIndex);
1312
}
1313
1314
// Now we can call the underlying function, which expects that phi
1315
// operands have been removed.
1316
removePredecessorWithoutPhiOperands(pred, predIndex);
1317
}
1318
1319
void MBasicBlock::inheritPhis(MBasicBlock* header) {
1320
MResumePoint* headerRp = header->entryResumePoint();
1321
size_t stackDepth = headerRp->stackDepth();
1322
for (size_t slot = 0; slot < stackDepth; slot++) {
1323
MDefinition* exitDef = getSlot(slot);
1324
MDefinition* loopDef = headerRp->getOperand(slot);
1325
if (loopDef->block() != header) {
1326
MOZ_ASSERT(loopDef->block()->id() < header->id());
1327
MOZ_ASSERT(loopDef == exitDef);
1328
continue;
1329
}
1330
1331
// Phis are allocated by NewPendingLoopHeader.
1332
MPhi* phi = loopDef->toPhi();
1333
MOZ_ASSERT(phi->numOperands() == 2);
1334
1335
// The entry definition is always the leftmost input to the phi.
1336
MDefinition* entryDef = phi->getOperand(0);
1337
1338
if (entryDef != exitDef) {
1339
continue;
1340
}
1341
1342
// If the entryDef is the same as exitDef, then we must propagate the
1343
// phi down to this successor. This chance was missed as part of
1344
// setBackedge() because exits are not captured in resume points.
1345
setSlot(slot, phi);
1346
}
1347
}
1348
1349
bool MBasicBlock::inheritPhisFromBackedge(TempAllocator& alloc,
1350
MBasicBlock* backedge,
1351
bool* hadTypeChange) {
1352
// We must be a pending loop header
1353
MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1354
1355
size_t stackDepth = entryResumePoint()->stackDepth();
1356
for (size_t slot = 0; slot < stackDepth; slot++) {
1357
// Get the value stack-slot of the back edge.
1358
MDefinition* exitDef = backedge->getSlot(slot);
1359
1360
// Get the value of the loop header.
1361
MDefinition* loopDef = entryResumePoint()->getOperand(slot);
1362
if (loopDef->block() != this) {
1363
// If we are finishing a pending loop header, then we need to ensure
1364
// that all operands are phis. This is usualy the case, except for
1365
// object/arrays build with generators, in which case we share the
1366
// same allocations across all blocks.
1367
MOZ_ASSERT(loopDef->block()->id() < id());
1368
MOZ_ASSERT(loopDef == exitDef);
1369
continue;
1370
}
1371
1372
// Phis are allocated by NewPendingLoopHeader.
1373
MPhi* entryDef = loopDef->toPhi();
1374
MOZ_ASSERT(entryDef->block() == this);
1375
1376
if (entryDef == exitDef) {
1377
// If the exit def is the same as the entry def, make a redundant
1378
// phi. Since loop headers have exactly two incoming edges, we
1379
// know that that's just the first input.
1380
//
1381
// Note that we eliminate later rather than now, to avoid any
1382
// weirdness around pending continue edges which might still hold
1383
// onto phis.
1384
exitDef = entryDef->getOperand(0);
1385
}
1386
1387
bool typeChange = false;
1388
1389
if (!entryDef->addInputSlow(exitDef)) {
1390
return false;
1391
}
1392
if (!entryDef->checkForTypeChange(alloc, exitDef, &typeChange)) {
1393
return false;
1394
}
1395
*hadTypeChange |= typeChange;
1396
}
1397
1398
return true;
1399
}
1400
1401
bool MBasicBlock::specializePhis(TempAllocator& alloc) {
1402
if (specialized_) {
1403
return true;
1404
}
1405
1406
specialized_ = true;
1407
for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
1408
MPhi* phi = *iter;
1409
if (!phi->specializeType(alloc)) {
1410
return false;
1411
}
1412
}
1413
return true;
1414
}
1415
1416
MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
1417
*pdirection = FALSE_BRANCH;
1418
1419
if (numPredecessors() != 1) {
1420
return nullptr;
1421
}
1422
1423
MBasicBlock* dom = immediateDominator();
1424
if (dom != getPredecessor(0)) {
1425
return nullptr;
1426
}
1427
1428
// Look for a trailing MTest branching to this block.
1429
MInstruction* ins = dom->lastIns();
1430
if (ins->isTest()) {
1431
MTest* test = ins->toTest();
1432
1433
MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
1434
if (test->ifTrue() == this && test->ifFalse() == this) {
1435
return nullptr;
1436
}
1437
1438
*pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
1439
return test;
1440
}
1441
1442
return nullptr;
1443
}
1444
1445
MBasicBlock::BackupPoint::BackupPoint(MBasicBlock* current)
1446
: current_(current),
1447
lastIns_(current->hasAnyIns() ? *current->rbegin() : nullptr),
1448
stackPosition_(current->stackDepth()),
1449
slots_()
1450
#ifdef DEBUG
1451
,
1452
lastPhi_(!current->phisEmpty() ? *current->phis_.rbegin() : nullptr),
1453
predecessorsCheckSum_(computePredecessorsCheckSum(current)),
1454
instructionsCheckSum_(computeInstructionsCheckSum(current)),
1455
id_(current->id()),
1456
callerResumePoint_(current->callerResumePoint()),
1457
entryResumePoint_(current->entryResumePoint())
1458
#endif
1459
{
1460
// The block is not yet jumping into a block of an inlined function yet.
1461
MOZ_ASSERT(current->outerResumePoint_ == nullptr);
1462
}
1463
1464
bool MBasicBlock::BackupPoint::init(TempAllocator& alloc) {
1465
if (!slots_.init(alloc, stackPosition_)) {
1466
return false;
1467
}
1468
for (size_t i = 0, e = stackPosition_; i < e; ++i) {
1469
slots_[i] = current_->slots_[i];
1470
}
1471
return true;
1472
}
1473
1474
#ifdef DEBUG
1475
uintptr_t MBasicBlock::BackupPoint::computePredecessorsCheckSum(
1476
MBasicBlock* block) {
1477
uintptr_t hash = 0;
1478
for (size_t i = 0; i < block->numPredecessors(); i++) {
1479
MBasicBlock* pred = block->getPredecessor(i);
1480
uintptr_t data = reinterpret_cast<uintptr_t>(pred);
1481
hash = data + (hash << 6) + (hash << 16) - hash;
1482
}
1483
return hash;
1484
}
1485
1486
HashNumber MBasicBlock::BackupPoint::computeInstructionsCheckSum(
1487
MBasicBlock* block) {
1488
HashNumber h = 0;
1489
MOZ_ASSERT_IF(lastIns_, lastIns_->block() == block);
1490
for (MInstructionIterator ins = block->begin(); ins != block->end(); ++ins) {
1491
h += ins->valueHash();
1492
h += h << 10;
1493
h ^= h >> 6;
1494
}
1495
return h;
1496
}
1497
#endif
1498
1499
MBasicBlock* MBasicBlock::BackupPoint::restore() {
1500
// No extra Phi got added.
1501
MOZ_ASSERT((!current_->phisEmpty() ? *current_->phis_.rbegin() : nullptr) ==
1502
lastPhi_);
1503
1504
MOZ_ASSERT_IF(lastIns_, lastIns_->block() == current_);
1505
MOZ_ASSERT_IF(lastIns_, !lastIns_->isDiscarded());
1506
1507
if (!current_->graph().removeSuccessorBlocks(current_)) {
1508
return nullptr;
1509
}
1510
1511
MInstructionIterator lastIns(lastIns_ ? ++(current_->begin(lastIns_))
1512
: current_->begin());
1513
current_->discardAllInstructionsStartingAt(lastIns);
1514
current_->clearOuterResumePoint();
1515
1516
MOZ_ASSERT(current_->slots_.length() >= stackPosition_);
1517
if (current_->stackPosition_ != stackPosition_) {
1518
current_->setStackDepth(stackPosition_);
1519
}
1520
for (size_t i = 0, e = stackPosition_; i < e; ++i) {
1521
current_->slots_[i] = slots_[i];
1522
}
1523
1524
MOZ_ASSERT(current_->id() == id_);
1525
MOZ_ASSERT(predecessorsCheckSum_ == computePredecessorsCheckSum(current_));
1526
MOZ_ASSERT(instructionsCheckSum_ == computeInstructionsCheckSum(current_));
1527
MOZ_ASSERT(current_->callerResumePoint() == callerResumePoint_);
1528
MOZ_ASSERT(current_->entryResumePoint() == entryResumePoint_);
1529
1530
return current_;
1531
}
1532
1533
void MBasicBlock::dumpStack(GenericPrinter& out) {
1534
#ifdef DEBUG
1535
out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
1536
out.printf("-------------------------------------------\n");
1537
for (uint32_t i = 0; i < stackPosition_; i++) {
1538
out.printf(" %-3u", i);
1539
out.printf(" %-16p\n", (void*)slots_[i]);
1540
}
1541
#endif
1542
}
1543
1544
void MBasicBlock::dumpStack() {
1545
Fprinter out(stderr);
1546
dumpStack(out);
1547
out.finish();
1548
}
1549
1550
void MIRGraph::dump(GenericPrinter& out) {
1551
#ifdef JS_JITSPEW
1552
for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
1553
iter->dump(out);
1554
out.printf("\n");
1555
}
1556
#endif
1557
}
1558
1559
void MIRGraph::dump() {
1560
Fprinter out(stderr);
1561
dump(out);
1562
out.finish();
1563
}
1564
1565
void MBasicBlock::dump(GenericPrinter& out) {
1566
#ifdef JS_JITSPEW
1567
out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
1568
unreachable() ? " (unreachable)" : "",
1569
isMarked() ? " (marked)" : "");
1570
if (MResumePoint* resume = entryResumePoint()) {
1571
resume->dump(out);
1572
}
1573
for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
1574
iter->dump(out);
1575
}
1576
for (MInstructionIterator iter(begin()); iter != end(); iter++) {
1577
iter->dump(out);
1578
}
1579
#endif
1580
}
1581
1582
void MBasicBlock::dump() {
1583
Fprinter out(stderr);
1584
dump(out);
1585
out.finish();
1586
}