Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#ifndef jit_CacheIRCompiler_h
8
#define jit_CacheIRCompiler_h
9
10
#include "mozilla/Maybe.h"
11
12
#include "jit/CacheIR.h"
13
#include "jit/JitOptions.h"
14
#include "jit/SharedICRegisters.h"
15
16
namespace js {
17
namespace jit {
18
19
class BaselineCacheIRCompiler;
20
class IonCacheIRCompiler;
21
22
// The ops below are defined in CacheIRCompiler and codegen is shared between
23
// BaselineCacheIRCompiler and IonCacheIRCompiler.
24
#define CACHE_IR_SHARED_OPS(_) \
25
_(GuardToObject) \
26
_(GuardIsNullOrUndefined) \
27
_(GuardIsNotNullOrUndefined) \
28
_(GuardIsNull) \
29
_(GuardIsUndefined) \
30
_(GuardIsObjectOrNull) \
31
_(GuardToBoolean) \
32
_(GuardToString) \
33
_(GuardToSymbol) \
34
_(GuardToBigInt) \
35
_(GuardIsNumber) \
36
_(GuardToInt32) \
37
_(GuardToInt32Index) \
38
_(GuardToTypedArrayIndex) \
39
_(GuardToInt32ModUint32) \
40
_(GuardToUint8Clamped) \
41
_(GuardType) \
42
_(GuardClass) \
43
_(GuardGroupHasUnanalyzedNewScript) \
44
_(GuardIsExtensible) \
45
_(GuardFunctionIsNative) \
46
_(GuardFunctionIsConstructor) \
47
_(GuardSpecificNativeFunction) \
48
_(GuardFunctionPrototype) \
49
_(GuardIsNativeObject) \
50
_(GuardIsProxy) \
51
_(GuardNotDOMProxy) \
52
_(GuardSpecificInt32Immediate) \
53
_(GuardMagicValue) \
54
_(GuardNoDenseElements) \
55
_(GuardAndGetNumberFromString) \
56
_(GuardAndGetNumberFromBoolean) \
57
_(GuardAndGetIndexFromString) \
58
_(GuardIndexIsNonNegative) \
59
_(GuardIndexGreaterThanDenseCapacity) \
60
_(GuardIndexGreaterThanArrayLength) \
61
_(GuardIndexIsValidUpdateOrAdd) \
62
_(GuardIndexGreaterThanDenseInitLength) \
63
_(GuardTagNotEqual) \
64
_(GuardXrayExpandoShapeAndDefaultProto) \
65
_(GuardNoAllocationMetadataBuilder) \
66
_(GuardObjectGroupNotPretenured) \
67
_(GuardFunctionHasJitEntry) \
68
_(GuardNotClassConstructor) \
69
_(LoadObject) \
70
_(LoadProto) \
71
_(LoadEnclosingEnvironment) \
72
_(LoadWrapperTarget) \
73
_(LoadValueTag) \
74
_(LoadDOMExpandoValue) \
75
_(LoadDOMExpandoValueIgnoreGeneration) \
76
_(LoadUndefinedResult) \
77
_(LoadBooleanResult) \
78
_(LoadInt32ArrayLengthResult) \
79
_(DoubleAddResult) \
80
_(DoubleSubResult) \
81
_(DoubleMulResult) \
82
_(DoubleDivResult) \
83
_(DoubleModResult) \
84
_(Int32AddResult) \
85
_(Int32SubResult) \
86
_(Int32MulResult) \
87
_(Int32DivResult) \
88
_(Int32ModResult) \
89
_(Int32BitOrResult) \
90
_(Int32BitXorResult) \
91
_(Int32BitAndResult) \
92
_(Int32LeftShiftResult) \
93
_(Int32RightShiftResult) \
94
_(Int32URightShiftResult) \
95
_(Int32NegationResult) \
96
_(Int32NotResult) \
97
_(Int32IncResult) \
98
_(Int32DecResult) \
99
_(DoubleIncResult) \
100
_(DoubleDecResult) \
101
_(DoubleNegationResult) \
102
_(BigIntAddResult) \
103
_(BigIntSubResult) \
104
_(BigIntMulResult) \
105
_(BigIntDivResult) \
106
_(BigIntModResult) \
107
_(BigIntPowResult) \
108
_(BigIntBitOrResult) \
109
_(BigIntBitXorResult) \
110
_(BigIntBitAndResult) \
111
_(BigIntLeftShiftResult) \
112
_(BigIntRightShiftResult) \
113
_(BigIntNegationResult) \
114
_(BigIntNotResult) \
115
_(BigIntIncResult) \
116
_(BigIntDecResult) \
117
_(TruncateDoubleToUInt32) \
118
_(LoadArgumentsObjectLengthResult) \
119
_(LoadFunctionLengthResult) \
120
_(LoadStringLengthResult) \
121
_(LoadStringCharResult) \
122
_(LoadArgumentsObjectArgResult) \
123
_(LoadInstanceOfObjectResult) \
124
_(LoadTypedObjectResult) \
125
_(LoadDenseElementResult) \
126
_(LoadDenseElementHoleResult) \
127
_(LoadDenseElementExistsResult) \
128
_(LoadDenseElementHoleExistsResult) \
129
_(LoadTypedElementExistsResult) \
130
_(LoadTypedElementResult) \
131
_(LoadObjectResult) \
132
_(LoadTypeOfObjectResult) \
133
_(LoadInt32TruthyResult) \
134
_(LoadDoubleTruthyResult) \
135
_(LoadStringTruthyResult) \
136
_(LoadObjectTruthyResult) \
137
_(LoadBigIntTruthyResult) \
138
_(LoadNewObjectFromTemplateResult) \
139
_(CompareObjectResult) \
140
_(CompareSymbolResult) \
141
_(CompareInt32Result) \
142
_(CompareDoubleResult) \
143
_(CompareBigIntResult) \
144
_(CompareBigIntInt32Result) \
145
_(CompareInt32BigIntResult) \
146
_(CompareBigIntNumberResult) \
147
_(CompareNumberBigIntResult) \
148
_(CompareBigIntStringResult) \
149
_(CompareStringBigIntResult) \
150
_(CompareObjectUndefinedNullResult) \
151
_(ArrayJoinResult) \
152
_(StoreTypedElement) \
153
_(StoreTypedObjectScalarProperty) \
154
_(CallPrintString) \
155
_(Breakpoint) \
156
_(MegamorphicLoadSlotResult) \
157
_(MegamorphicLoadSlotByValueResult) \
158
_(MegamorphicStoreSlot) \
159
_(MegamorphicHasPropResult) \
160
_(CallObjectHasSparseElementResult) \
161
_(CallInt32ToString) \
162
_(CallNumberToString) \
163
_(BooleanToString) \
164
_(CallStringConcatResult) \
165
_(CallIsSuspendedGeneratorResult) \
166
_(CallNativeGetElementResult) \
167
_(CallProxyHasPropResult) \
168
_(CallProxyGetByValueResult) \
169
_(CallGetSparseElementResult) \
170
_(MetaTwoByte) \
171
_(WrapResult)
172
173
// [SMDDOC] CacheIR Value Representation and Tracking
174
//
175
// While compiling an IC stub the CacheIR compiler needs to keep track of the
176
// physical location for each logical piece of data we care about, as well as
177
// ensure that in the case of a stub failing, we are able to restore the input
178
// state so that a subsequent stub can attempt to provide a value.
179
//
180
// OperandIds are created in the CacheIR front-end to keep track of values that
181
// are passed between CacheIR ops during the execution of a given CacheIR stub.
182
// In the CacheRegisterAllocator these OperandIds are given OperandLocations,
183
// that represent the physical location of the OperandId at a given point in
184
// time during CacheRegister allocation.
185
//
186
// In the CacheRegisterAllocator physical locations include the stack, and
187
// registers, as well as whether or not the value has been unboxed or not.
188
// Constants are also represented separately to provide for on-demand
189
// materialization.
190
//
191
// Intra-op Register allocation:
192
//
193
// During the emission of a CacheIR op, code can ask the CacheRegisterAllocator
194
// for access to a particular OperandId, and the register allocator will
195
// generate the required code to fill that request.
196
//
197
// Input OperandIds should be considered as immutable, and should not be mutated
198
// during the execution of a stub.
199
//
200
// There are also a number of RAII classes that interact with the register
201
// allocator, in order to provide access to more registers than just those
202
// provided for by the OperandIds.
203
//
204
// - AutoOutputReg: The register which will hold the output value of the stub.
205
// - AutoScratchReg: By default, an arbitrary scratch register, however a
206
// specific register can be requested.
207
// - AutoScratchRegMaybeOutput: Any arbitrary scratch register, but the output
208
// register may be used as well.
209
//
210
// These RAII classes take ownership of a register for the duration of their
211
// lifetime so they can be used for computation or output. The register
212
// allocator can spill values with OperandLocations in order to try to ensure
213
// that a register is made available for use.
214
//
215
// If a specific register is required (via AutoScratchRegister), it should be
216
// the first register acquired, as the register rallocator will be unable to
217
// allocate the fixed register if the current op is using it for something else.
218
//
219
// If no register can be provided after attempting to spill, a
220
// MOZ_RELEASE_ASSERT ensures the browser will crash. The register allocator is
221
// not provided enough information in its current design to insert spills and
222
// fills at arbitrary locations, and so it can fail to find an allocation
223
// solution. However, this will only happen within the implementation of an
224
// operand emitter, and because the cache register allocator is mostly
225
// determinstic, so long as the operand id emitter is tested, this won't
226
// suddenly crop up in an arbitrary webpage. It's worth noting the most
227
// difficult platform to support is x86-32, because it has the least number of
228
// registers available.
229
//
230
// FailurePaths checkpoint the state of the register allocator so that the input
231
// state can be recomputed from the current state before jumping to the next
232
// stub in the IC chain. An important invariant is that the FailurePath must be
233
// allocated for each op after all the manipulation of OperandLocations has
234
// happened, so that its recording is correct.
235
//
236
// Inter-op Register Allocation:
237
//
238
// The RAII register management classes are RAII because all register state
239
// outside the OperandLocations is reset before the compilation of each
240
// individual CacheIR op. This means that you cannot rely on a value surviving
241
// between ops, even if you use the ability of AutoScratchRegister to name a
242
// specific register. Values that need to be preserved between ops must be given
243
// an OperandId.
244
245
// Represents a Value on the Baseline frame's expression stack. Slot 0 is the
246
// value on top of the stack (the most recently pushed value), slot 1 is the
247
// value pushed before that, etc.
248
class BaselineFrameSlot {
249
uint32_t slot_;
250
251
public:
252
explicit BaselineFrameSlot(uint32_t slot) : slot_(slot) {}
253
uint32_t slot() const { return slot_; }
254
255
bool operator==(const BaselineFrameSlot& other) const {
256
return slot_ == other.slot_;
257
}
258
bool operator!=(const BaselineFrameSlot& other) const {
259
return slot_ != other.slot_;
260
}
261
};
262
263
// OperandLocation represents the location of an OperandId. The operand is
264
// either in a register or on the stack, and is either boxed or unboxed.
265
class OperandLocation {
266
public:
267
enum Kind {
268
Uninitialized = 0,
269
PayloadReg,
270
DoubleReg,
271
ValueReg,
272
PayloadStack,
273
ValueStack,
274
BaselineFrame,
275
Constant,
276
};
277
278
private:
279
Kind kind_;
280
281
union Data {
282
struct {
283
Register reg;
284
JSValueType type;
285
} payloadReg;
286
FloatRegister doubleReg;
287
ValueOperand valueReg;
288
struct {
289
uint32_t stackPushed;
290
JSValueType type;
291
} payloadStack;
292
uint32_t valueStackPushed;
293
BaselineFrameSlot baselineFrameSlot;
294
Value constant;
295
296
Data() : valueStackPushed(0) {}
297
};
298
Data data_;
299
300
public:
301
OperandLocation() : kind_(Uninitialized) {}
302
303
Kind kind() const { return kind_; }
304
305
void setUninitialized() { kind_ = Uninitialized; }
306
307
ValueOperand valueReg() const {
308
MOZ_ASSERT(kind_ == ValueReg);
309
return data_.valueReg;
310
}
311
Register payloadReg() const {
312
MOZ_ASSERT(kind_ == PayloadReg);
313
return data_.payloadReg.reg;
314
}
315
FloatRegister doubleReg() const {
316
MOZ_ASSERT(kind_ == DoubleReg);
317
return data_.doubleReg;
318
}
319
uint32_t payloadStack() const {
320
MOZ_ASSERT(kind_ == PayloadStack);
321
return data_.payloadStack.stackPushed;
322
}
323
uint32_t valueStack() const {
324
MOZ_ASSERT(kind_ == ValueStack);
325
return data_.valueStackPushed;
326
}
327
JSValueType payloadType() const {
328
if (kind_ == PayloadReg) {
329
return data_.payloadReg.type;
330
}
331
MOZ_ASSERT(kind_ == PayloadStack);
332
return data_.payloadStack.type;
333
}
334
Value constant() const {
335
MOZ_ASSERT(kind_ == Constant);
336
return data_.constant;
337
}
338
BaselineFrameSlot baselineFrameSlot() const {
339
MOZ_ASSERT(kind_ == BaselineFrame);
340
return data_.baselineFrameSlot;
341
}
342
343
void setPayloadReg(Register reg, JSValueType type) {
344
kind_ = PayloadReg;
345
data_.payloadReg.reg = reg;
346
data_.payloadReg.type = type;
347
}
348
void setDoubleReg(FloatRegister reg) {
349
kind_ = DoubleReg;
350
data_.doubleReg = reg;
351
}
352
void setValueReg(ValueOperand reg) {
353
kind_ = ValueReg;
354
data_.valueReg = reg;
355
}
356
void setPayloadStack(uint32_t stackPushed, JSValueType type) {
357
kind_ = PayloadStack;
358
data_.payloadStack.stackPushed = stackPushed;
359
data_.payloadStack.type = type;
360
}
361
void setValueStack(uint32_t stackPushed) {
362
kind_ = ValueStack;
363
data_.valueStackPushed = stackPushed;
364
}
365
void setConstant(const Value& v) {
366
kind_ = Constant;
367
data_.constant = v;
368
}
369
void setBaselineFrame(BaselineFrameSlot slot) {
370
kind_ = BaselineFrame;
371
data_.baselineFrameSlot = slot;
372
}
373
374
bool isUninitialized() const { return kind_ == Uninitialized; }
375
bool isInRegister() const { return kind_ == PayloadReg || kind_ == ValueReg; }
376
bool isOnStack() const {
377
return kind_ == PayloadStack || kind_ == ValueStack;
378
}
379
380
size_t stackPushed() const {
381
if (kind_ == PayloadStack) {
382
return data_.payloadStack.stackPushed;
383
}
384
MOZ_ASSERT(kind_ == ValueStack);
385
return data_.valueStackPushed;
386
}
387
size_t stackSizeInBytes() const {
388
if (kind_ == PayloadStack) {
389
return sizeof(uintptr_t);
390
}
391
MOZ_ASSERT(kind_ == ValueStack);
392
return sizeof(js::Value);
393
}
394
void adjustStackPushed(int32_t diff) {
395
if (kind_ == PayloadStack) {
396
data_.payloadStack.stackPushed += diff;
397
return;
398
}
399
MOZ_ASSERT(kind_ == ValueStack);
400
data_.valueStackPushed += diff;
401
}
402
403
bool aliasesReg(Register reg) const {
404
if (kind_ == PayloadReg) {
405
return payloadReg() == reg;
406
}
407
if (kind_ == ValueReg) {
408
return valueReg().aliases(reg);
409
}
410
return false;
411
}
412
bool aliasesReg(ValueOperand reg) const {
413
#if defined(JS_NUNBOX32)
414
return aliasesReg(reg.typeReg()) || aliasesReg(reg.payloadReg());
415
#else
416
return aliasesReg(reg.valueReg());
417
#endif
418
}
419
420
bool aliasesReg(const OperandLocation& other) const;
421
422
bool operator==(const OperandLocation& other) const;
423
bool operator!=(const OperandLocation& other) const {
424
return !operator==(other);
425
}
426
};
427
428
struct SpilledRegister {
429
Register reg;
430
uint32_t stackPushed;
431
432
SpilledRegister(Register reg, uint32_t stackPushed)
433
: reg(reg), stackPushed(stackPushed) {}
434
bool operator==(const SpilledRegister& other) const {
435
return reg == other.reg && stackPushed == other.stackPushed;
436
}
437
bool operator!=(const SpilledRegister& other) const {
438
return !(*this == other);
439
}
440
};
441
442
using SpilledRegisterVector = Vector<SpilledRegister, 2, SystemAllocPolicy>;
443
444
// Class to track and allocate registers while emitting IC code.
445
class MOZ_RAII CacheRegisterAllocator {
446
// The original location of the inputs to the cache.
447
Vector<OperandLocation, 4, SystemAllocPolicy> origInputLocations_;
448
449
// The current location of each operand.
450
Vector<OperandLocation, 8, SystemAllocPolicy> operandLocations_;
451
452
// Free lists for value- and payload-slots on stack
453
Vector<uint32_t, 2, SystemAllocPolicy> freeValueSlots_;
454
Vector<uint32_t, 2, SystemAllocPolicy> freePayloadSlots_;
455
456
// The registers allocated while emitting the current CacheIR op.
457
// This prevents us from allocating a register and then immediately
458
// clobbering it for something else, while we're still holding on to it.
459
LiveGeneralRegisterSet currentOpRegs_;
460
461
const AllocatableGeneralRegisterSet allocatableRegs_;
462
463
// Registers that are currently unused and available.
464
AllocatableGeneralRegisterSet availableRegs_;
465
466
// Registers that are available, but before use they must be saved and
467
// then restored when returning from the stub.
468
AllocatableGeneralRegisterSet availableRegsAfterSpill_;
469
470
// Registers we took from availableRegsAfterSpill_ and spilled to the stack.
471
SpilledRegisterVector spilledRegs_;
472
473
// The number of bytes pushed on the native stack.
474
uint32_t stackPushed_;
475
476
#ifdef DEBUG
477
// Flag used to assert individual CacheIR instructions don't allocate
478
// registers after calling addFailurePath.
479
bool addedFailurePath_;
480
#endif
481
482
// The index of the CacheIR instruction we're currently emitting.
483
uint32_t currentInstruction_;
484
485
const CacheIRWriter& writer_;
486
487
CacheRegisterAllocator(const CacheRegisterAllocator&) = delete;
488
CacheRegisterAllocator& operator=(const CacheRegisterAllocator&) = delete;
489
490
void freeDeadOperandLocations(MacroAssembler& masm);
491
492
void spillOperandToStack(MacroAssembler& masm, OperandLocation* loc);
493
void spillOperandToStackOrRegister(MacroAssembler& masm,
494
OperandLocation* loc);
495
496
void popPayload(MacroAssembler& masm, OperandLocation* loc, Register dest);
497
void popValue(MacroAssembler& masm, OperandLocation* loc, ValueOperand dest);
498
Address valueAddress(MacroAssembler& masm, OperandLocation* loc);
499
500
#ifdef DEBUG
501
void assertValidState() const;
502
#endif
503
504
public:
505
friend class AutoScratchRegister;
506
friend class AutoScratchRegisterExcluding;
507
508
explicit CacheRegisterAllocator(const CacheIRWriter& writer)
509
: allocatableRegs_(GeneralRegisterSet::All()),
510
stackPushed_(0),
511
#ifdef DEBUG
512
addedFailurePath_(false),
513
#endif
514
currentInstruction_(0),
515
writer_(writer) {
516
}
517
518
MOZ_MUST_USE bool init();
519
520
void initAvailableRegs(const AllocatableGeneralRegisterSet& available) {
521
availableRegs_ = available;
522
}
523
void initAvailableRegsAfterSpill();
524
525
void fixupAliasedInputs(MacroAssembler& masm);
526
527
OperandLocation operandLocation(size_t i) const {
528
return operandLocations_[i];
529
}
530
void setOperandLocation(size_t i, const OperandLocation& loc) {
531
operandLocations_[i] = loc;
532
}
533
534
OperandLocation origInputLocation(size_t i) const {
535
return origInputLocations_[i];
536
}
537
void initInputLocation(size_t i, ValueOperand reg) {
538
origInputLocations_[i].setValueReg(reg);
539
operandLocations_[i].setValueReg(reg);
540
}
541
void initInputLocation(size_t i, Register reg, JSValueType type) {
542
origInputLocations_[i].setPayloadReg(reg, type);
543
operandLocations_[i].setPayloadReg(reg, type);
544
}
545
void initInputLocation(size_t i, FloatRegister reg) {
546
origInputLocations_[i].setDoubleReg(reg);
547
operandLocations_[i].setDoubleReg(reg);
548
}
549
void initInputLocation(size_t i, const Value& v) {
550
origInputLocations_[i].setConstant(v);
551
operandLocations_[i].setConstant(v);
552
}
553
void initInputLocation(size_t i, BaselineFrameSlot slot) {
554
origInputLocations_[i].setBaselineFrame(slot);
555
operandLocations_[i].setBaselineFrame(slot);
556
}
557
558
void initInputLocation(size_t i, const TypedOrValueRegister& reg);
559
void initInputLocation(size_t i, const ConstantOrRegister& value);
560
561
const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
562
563
MOZ_MUST_USE bool setSpilledRegs(const SpilledRegisterVector& regs) {
564
spilledRegs_.clear();
565
return spilledRegs_.appendAll(regs);
566
}
567
568
void nextOp() {
569
#ifdef DEBUG
570
assertValidState();
571
addedFailurePath_ = false;
572
#endif
573
currentOpRegs_.clear();
574
currentInstruction_++;
575
}
576
577
#ifdef DEBUG
578
void setAddedFailurePath() {
579
MOZ_ASSERT(!addedFailurePath_, "multiple failure paths for instruction");
580
addedFailurePath_ = true;
581
}
582
#endif
583
584
bool isDeadAfterInstruction(OperandId opId) const {
585
return writer_.operandIsDead(opId.id(), currentInstruction_ + 1);
586
}
587
588
uint32_t stackPushed() const { return stackPushed_; }
589
void setStackPushed(uint32_t pushed) { stackPushed_ = pushed; }
590
591
bool isAllocatable(Register reg) const { return allocatableRegs_.has(reg); }
592
593
// Allocates a new register.
594
Register allocateRegister(MacroAssembler& masm);
595
ValueOperand allocateValueRegister(MacroAssembler& masm);
596
597
void allocateFixedRegister(MacroAssembler& masm, Register reg);
598
void allocateFixedValueRegister(MacroAssembler& masm, ValueOperand reg);
599
600
// Releases a register so it can be reused later.
601
void releaseRegister(Register reg) {
602
MOZ_ASSERT(currentOpRegs_.has(reg));
603
availableRegs_.add(reg);
604
currentOpRegs_.take(reg);
605
}
606
void releaseValueRegister(ValueOperand reg) {
607
#ifdef JS_NUNBOX32
608
releaseRegister(reg.payloadReg());
609
releaseRegister(reg.typeReg());
610
#else
611
releaseRegister(reg.valueReg());
612
#endif
613
}
614
615
// Removes spilled values from the native stack. This should only be
616
// called after all registers have been allocated.
617
void discardStack(MacroAssembler& masm);
618
619
Address addressOf(MacroAssembler& masm, BaselineFrameSlot slot) const;
620
BaseValueIndex addressOf(MacroAssembler& masm, Register argcReg,
621
BaselineFrameSlot slot) const;
622
623
// Returns the register for the given operand. If the operand is currently
624
// not in a register, it will load it into one.
625
ValueOperand useValueRegister(MacroAssembler& masm, ValOperandId val);
626
ValueOperand useFixedValueRegister(MacroAssembler& masm, ValOperandId valId,
627
ValueOperand reg);
628
Register useRegister(MacroAssembler& masm, TypedOperandId typedId);
629
630
ConstantOrRegister useConstantOrRegister(MacroAssembler& masm,
631
ValOperandId val);
632
633
// Allocates an output register for the given operand.
634
Register defineRegister(MacroAssembler& masm, TypedOperandId typedId);
635
ValueOperand defineValueRegister(MacroAssembler& masm, ValOperandId val);
636
637
// Loads (potentially coercing) and unboxes a value into a float register
638
// This is infallible, as there should have been a previous guard
639
// to ensure the ValOperandId is already a number.
640
void ensureDoubleRegister(MacroAssembler& masm, NumberOperandId op,
641
FloatRegister dest);
642
643
// Returns |val|'s JSValueType or JSVAL_TYPE_UNKNOWN.
644
JSValueType knownType(ValOperandId val) const;
645
646
// Emits code to restore registers and stack to the state at the start of
647
// the stub.
648
void restoreInputState(MacroAssembler& masm, bool discardStack = true);
649
650
// Returns the set of registers storing the IC input operands.
651
GeneralRegisterSet inputRegisterSet() const;
652
653
void saveIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs,
654
Register scratch, IonScript* ionScript);
655
void restoreIonLiveRegisters(MacroAssembler& masm, LiveRegisterSet liveRegs);
656
};
657
658
// RAII class to allocate a scratch register and release it when we're done
659
// with it.
660
class MOZ_RAII AutoScratchRegister {
661
CacheRegisterAllocator& alloc_;
662
Register reg_;
663
664
AutoScratchRegister(const AutoScratchRegister&) = delete;
665
void operator=(const AutoScratchRegister&) = delete;
666
667
public:
668
AutoScratchRegister(CacheRegisterAllocator& alloc, MacroAssembler& masm,
669
Register reg = InvalidReg)
670
: alloc_(alloc) {
671
if (reg != InvalidReg) {
672
alloc.allocateFixedRegister(masm, reg);
673
reg_ = reg;
674
} else {
675
reg_ = alloc.allocateRegister(masm);
676
}
677
MOZ_ASSERT(alloc_.currentOpRegs_.has(reg_));
678
}
679
~AutoScratchRegister() { alloc_.releaseRegister(reg_); }
680
681
Register get() const { return reg_; }
682
operator Register() const { return reg_; }
683
};
684
685
// On x86, spectreBoundsCheck32 can emit better code if it has a scratch
686
// register and index masking is enabled.
687
class MOZ_RAII AutoSpectreBoundsScratchRegister {
688
mozilla::Maybe<AutoScratchRegister> scratch_;
689
Register reg_ = InvalidReg;
690
691
AutoSpectreBoundsScratchRegister(const AutoSpectreBoundsScratchRegister&) =
692
delete;
693
void operator=(const AutoSpectreBoundsScratchRegister&) = delete;
694
695
public:
696
AutoSpectreBoundsScratchRegister(CacheRegisterAllocator& alloc,
697
MacroAssembler& masm) {
698
#ifdef JS_CODEGEN_X86
699
if (JitOptions.spectreIndexMasking) {
700
scratch_.emplace(alloc, masm);
701
reg_ = scratch_->get();
702
}
703
#endif
704
}
705
706
Register get() const { return reg_; }
707
operator Register() const { return reg_; }
708
};
709
710
// The FailurePath class stores everything we need to generate a failure path
711
// at the end of the IC code. The failure path restores the input registers, if
712
// needed, and jumps to the next stub.
713
class FailurePath {
714
Vector<OperandLocation, 4, SystemAllocPolicy> inputs_;
715
SpilledRegisterVector spilledRegs_;
716
NonAssertingLabel label_;
717
uint32_t stackPushed_;
718
#ifdef DEBUG
719
// Flag to ensure FailurePath::label() isn't taken while there's a scratch
720
// float register which still needs to be restored.
721
bool hasAutoScratchFloatRegister_ = false;
722
#endif
723
724
public:
725
FailurePath() = default;
726
727
FailurePath(FailurePath&& other)
728
: inputs_(std::move(other.inputs_)),
729
spilledRegs_(std::move(other.spilledRegs_)),
730
label_(other.label_),
731
stackPushed_(other.stackPushed_) {}
732
733
Label* labelUnchecked() { return &label_; }
734
Label* label() {
735
MOZ_ASSERT(!hasAutoScratchFloatRegister_);
736
return labelUnchecked();
737
}
738
739
void setStackPushed(uint32_t i) { stackPushed_ = i; }
740
uint32_t stackPushed() const { return stackPushed_; }
741
742
MOZ_MUST_USE bool appendInput(const OperandLocation& loc) {
743
return inputs_.append(loc);
744
}
745
OperandLocation input(size_t i) const { return inputs_[i]; }
746
747
const SpilledRegisterVector& spilledRegs() const { return spilledRegs_; }
748
749
MOZ_MUST_USE bool setSpilledRegs(const SpilledRegisterVector& regs) {
750
MOZ_ASSERT(spilledRegs_.empty());
751
return spilledRegs_.appendAll(regs);
752
}
753
754
// If canShareFailurePath(other) returns true, the same machine code will
755
// be emitted for two failure paths, so we can share them.
756
bool canShareFailurePath(const FailurePath& other) const;
757
758
void setHasAutoScratchFloatRegister() {
759
#ifdef DEBUG
760
MOZ_ASSERT(!hasAutoScratchFloatRegister_);
761
hasAutoScratchFloatRegister_ = true;
762
#endif
763
}
764
765
void clearHasAutoScratchFloatRegister() {
766
#ifdef DEBUG
767
MOZ_ASSERT(hasAutoScratchFloatRegister_);
768
hasAutoScratchFloatRegister_ = false;
769
#endif
770
}
771
};
772
773
/**
774
* Wrap an offset so that a call can decide to embed a constant
775
* or load from the stub data.
776
*/
777
class StubFieldOffset {
778
private:
779
uint32_t offset_;
780
StubField::Type type_;
781
782
public:
783
StubFieldOffset(uint32_t offset, StubField::Type type)
784
: offset_(offset), type_(type) {}
785
786
uint32_t getOffset() { return offset_; }
787
StubField::Type getStubFieldType() { return type_; }
788
};
789
790
class AutoOutputRegister;
791
792
// Base class for BaselineCacheIRCompiler and IonCacheIRCompiler.
793
class MOZ_RAII CacheIRCompiler {
794
protected:
795
friend class AutoOutputRegister;
796
friend class AutoStubFrame;
797
friend class AutoSaveLiveRegisters;
798
friend class AutoCallVM;
799
friend class AutoScratchFloatRegister;
800
801
enum class Mode { Baseline, Ion };
802
803
bool preparedForVMCall_;
804
805
bool isBaseline();
806
bool isIon();
807
BaselineCacheIRCompiler* asBaseline();
808
IonCacheIRCompiler* asIon();
809
810
JSContext* cx_;
811
CacheIRReader reader;
812
const CacheIRWriter& writer_;
813
StackMacroAssembler masm;
814
815
CacheRegisterAllocator allocator;
816
Vector<FailurePath, 4, SystemAllocPolicy> failurePaths;
817
818
// Float registers that are live. Registers not in this set can be
819
// clobbered and don't need to be saved before performing a VM call.
820
// Doing this for non-float registers is a bit more complicated because
821
// the IC register allocator allocates GPRs.
822
LiveFloatRegisterSet liveFloatRegs_;
823
824
mozilla::Maybe<TypedOrValueRegister> outputUnchecked_;
825
Mode mode_;
826
827
// Whether this IC may read double values from uint32 arrays.
828
mozilla::Maybe<bool> allowDoubleResult_;
829
830
// Distance from the IC to the stub data; mostly will be
831
// sizeof(stubType)
832
uint32_t stubDataOffset_;
833
834
enum class StubFieldPolicy { Address, Constant };
835
836
StubFieldPolicy stubFieldPolicy_;
837
838
#ifdef DEBUG
839
const uint8_t* currentVerificationPosition_;
840
841
// Verify that the number of bytes consumed by the compiler matches
842
// up with the opcode signature in CACHE_IR_OPS.
843
void assertAllArgumentsConsumed() {
844
CacheOp prevOp = CacheOp(*currentVerificationPosition_);
845
uint32_t expectedLength = 1 + CacheIROpFormat::ArgLengths[uint8_t(prevOp)];
846
847
const uint8_t* newPosition = reader.currentPosition();
848
MOZ_ASSERT(newPosition > currentVerificationPosition_);
849
uint32_t actualLength = newPosition - currentVerificationPosition_;
850
MOZ_ASSERT(actualLength == expectedLength);
851
currentVerificationPosition_ = newPosition;
852
};
853
#endif
854
855
CacheIRCompiler(JSContext* cx, const CacheIRWriter& writer,
856
uint32_t stubDataOffset, Mode mode, StubFieldPolicy policy)
857
: preparedForVMCall_(false),
858
cx_(cx),
859
reader(writer),
860
writer_(writer),
861
allocator(writer_),
862
liveFloatRegs_(FloatRegisterSet::All()),
863
mode_(mode),
864
stubDataOffset_(stubDataOffset),
865
stubFieldPolicy_(policy) {
866
MOZ_ASSERT(!writer.failed());
867
#ifdef DEBUG
868
currentVerificationPosition_ = reader.currentPosition();
869
#endif
870
}
871
872
MOZ_MUST_USE bool addFailurePath(FailurePath** failure);
873
MOZ_MUST_USE bool emitFailurePath(size_t i);
874
875
// Returns the set of volatile float registers that are live. These
876
// registers need to be saved when making non-GC calls with callWithABI.
877
FloatRegisterSet liveVolatileFloatRegs() const {
878
return FloatRegisterSet::Intersect(liveFloatRegs_.set(),
879
FloatRegisterSet::Volatile());
880
}
881
882
bool objectGuardNeedsSpectreMitigations(ObjOperandId objId) const {
883
// Instructions like GuardShape need Spectre mitigations if
884
// (1) mitigations are enabled and (2) the object is used by other
885
// instructions (if the object is *not* used by other instructions,
886
// zeroing its register is pointless).
887
return JitOptions.spectreObjectMitigationsMisc &&
888
!allocator.isDeadAfterInstruction(objId);
889
}
890
891
void emitStoreTypedObjectReferenceProp(ValueOperand val, ReferenceType type,
892
const Address& dest, Register scratch);
893
894
void emitRegisterEnumerator(Register enumeratorsList, Register iter,
895
Register scratch);
896
897
private:
898
void emitPostBarrierShared(Register obj, const ConstantOrRegister& val,
899
Register scratch, Register maybeIndex);
900
901
void emitPostBarrierShared(Register obj, ValueOperand val, Register scratch,
902
Register maybeIndex) {
903
emitPostBarrierShared(obj, ConstantOrRegister(val), scratch, maybeIndex);
904
}
905
906
protected:
907
template <typename T>
908
void emitPostBarrierSlot(Register obj, const T& val, Register scratch) {
909
emitPostBarrierShared(obj, val, scratch, InvalidReg);
910
}
911
912
template <typename T>
913
void emitPostBarrierElement(Register obj, const T& val, Register scratch,
914
Register index) {
915
MOZ_ASSERT(index != InvalidReg);
916
emitPostBarrierShared(obj, val, scratch, index);
917
}
918
919
bool emitComparePointerResultShared(bool symbol);
920
921
bool emitCompareBigIntInt32ResultShared(Register bigInt, Register int32,
922
Register scratch1, Register scratch2,
923
JSOp op,
924
const AutoOutputRegister& output);
925
926
template <typename Fn, Fn fn>
927
MOZ_MUST_USE bool emitBigIntBinaryOperationShared();
928
929
template <typename Fn, Fn fn>
930
MOZ_MUST_USE bool emitBigIntUnaryOperationShared();
931
932
bool emitDoubleIncDecResult(bool isInc);
933
934
#define DEFINE_SHARED_OP(op) MOZ_MUST_USE bool emit##op();
935
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
936
#undef DEFINE_SHARED_OP
937
938
void emitLoadStubField(StubFieldOffset val, Register dest);
939
void emitLoadStubFieldConstant(StubFieldOffset val, Register dest);
940
Address emitAddressFromStubField(StubFieldOffset val, Register base);
941
942
uintptr_t readStubWord(uint32_t offset, StubField::Type type) {
943
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
944
MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
945
return writer_.readStubFieldForIon(offset, type).asWord();
946
}
947
uint64_t readStubInt64(uint32_t offset, StubField::Type type) {
948
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
949
MOZ_ASSERT((offset % sizeof(uintptr_t)) == 0);
950
return writer_.readStubFieldForIon(offset, type).asInt64();
951
}
952
int32_t int32StubField(uint32_t offset) {
953
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
954
return readStubWord(offset, StubField::Type::RawWord);
955
}
956
Shape* shapeStubField(uint32_t offset) {
957
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
958
return (Shape*)readStubWord(offset, StubField::Type::Shape);
959
}
960
JSObject* objectStubField(uint32_t offset) {
961
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
962
return (JSObject*)readStubWord(offset, StubField::Type::JSObject);
963
}
964
// This accessor is for cases where the stubField policy is
965
// being respected through other means, so we don't check the
966
// policy here. (see LoadNewObjectFromTemplateResult)
967
JSObject* objectStubFieldUnchecked(uint32_t offset) {
968
return (JSObject*)writer_
969
.readStubFieldForIon(offset, StubField::Type::JSObject)
970
.asWord();
971
}
972
JSString* stringStubField(uint32_t offset) {
973
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
974
return (JSString*)readStubWord(offset, StubField::Type::String);
975
}
976
JS::Symbol* symbolStubField(uint32_t offset) {
977
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
978
return (JS::Symbol*)readStubWord(offset, StubField::Type::Symbol);
979
}
980
ObjectGroup* groupStubField(uint32_t offset) {
981
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
982
return (ObjectGroup*)readStubWord(offset, StubField::Type::ObjectGroup);
983
}
984
JS::Compartment* compartmentStubField(uint32_t offset) {
985
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
986
return (JS::Compartment*)readStubWord(offset, StubField::Type::RawWord);
987
}
988
const JSClass* classStubField(uintptr_t offset) {
989
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
990
return (const JSClass*)readStubWord(offset, StubField::Type::RawWord);
991
}
992
const void* proxyHandlerStubField(uintptr_t offset) {
993
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
994
return (const void*)readStubWord(offset, StubField::Type::RawWord);
995
}
996
jsid idStubField(uint32_t offset) {
997
MOZ_ASSERT(stubFieldPolicy_ == StubFieldPolicy::Constant);
998
return jsid::fromRawBits(readStubWord(offset, StubField::Type::Id));
999
}
1000
1001
public:
1002
// The maximum number of arguments passed to a spread call or
1003
// fun_apply IC. Keep this small to avoid controllable stack
1004
// overflows by attackers passing large arrays.
1005
static const uint32_t MAX_ARGS_ARRAY_LENGTH = 16;
1006
1007
void callVMInternal(MacroAssembler& masm, VMFunctionId id);
1008
template <typename Fn, Fn fn>
1009
void callVM(MacroAssembler& masm);
1010
};
1011
1012
// Ensures the IC's output register is available for writing.
1013
class MOZ_RAII AutoOutputRegister {
1014
TypedOrValueRegister output_;
1015
CacheRegisterAllocator& alloc_;
1016
1017
AutoOutputRegister(const AutoOutputRegister&) = delete;
1018
void operator=(const AutoOutputRegister&) = delete;
1019
1020
public:
1021
explicit AutoOutputRegister(CacheIRCompiler& compiler);
1022
~AutoOutputRegister();
1023
1024
Register maybeReg() const {
1025
if (output_.hasValue()) {
1026
return output_.valueReg().scratchReg();
1027
}
1028
if (!output_.typedReg().isFloat()) {
1029
return output_.typedReg().gpr();
1030
}
1031
return InvalidReg;
1032
}
1033
1034
bool hasValue() const { return output_.hasValue(); }
1035
ValueOperand valueReg() const { return output_.valueReg(); }
1036
AnyRegister typedReg() const { return output_.typedReg(); }
1037
1038
JSValueType type() const {
1039
MOZ_ASSERT(!hasValue());
1040
return ValueTypeFromMIRType(output_.type());
1041
}
1042
1043
operator TypedOrValueRegister() const { return output_; }
1044
};
1045
1046
enum class CallCanGC { CanGC, CanNotGC };
1047
1048
// Instructions that have to perform a callVM require a stub frame. Call its
1049
// enter() and leave() methods to enter/leave the stub frame.
1050
// Hoisted from jit/BaselineCacheIRCompiler.cpp. See there for method
1051
// definitions.
1052
class MOZ_RAII AutoStubFrame {
1053
BaselineCacheIRCompiler& compiler;
1054
#ifdef DEBUG
1055
uint32_t framePushedAtEnterStubFrame_;
1056
#endif
1057
1058
AutoStubFrame(const AutoStubFrame&) = delete;
1059
void operator=(const AutoStubFrame&) = delete;
1060
1061
public:
1062
explicit AutoStubFrame(BaselineCacheIRCompiler& compiler);
1063
1064
void enter(MacroAssembler& masm, Register scratch,
1065
CallCanGC canGC = CallCanGC::CanGC);
1066
void leave(MacroAssembler& masm, bool calledIntoIon = false);
1067
1068
#ifdef DEBUG
1069
~AutoStubFrame();
1070
#endif
1071
};
1072
// AutoSaveLiveRegisters must be used when we make a call that can GC. The
1073
// constructor ensures all live registers are stored on the stack (where the GC
1074
// expects them) and the destructor restores these registers.
1075
class MOZ_RAII AutoSaveLiveRegisters {
1076
IonCacheIRCompiler& compiler_;
1077
1078
AutoSaveLiveRegisters(const AutoSaveLiveRegisters&) = delete;
1079
void operator=(const AutoSaveLiveRegisters&) = delete;
1080
1081
public:
1082
explicit AutoSaveLiveRegisters(IonCacheIRCompiler& compiler);
1083
1084
~AutoSaveLiveRegisters();
1085
};
1086
// Like AutoScratchRegister, but reuse a register of |output| if possible.
1087
class MOZ_RAII AutoScratchRegisterMaybeOutput {
1088
mozilla::Maybe<AutoScratchRegister> scratch_;
1089
Register scratchReg_;
1090
1091
AutoScratchRegisterMaybeOutput(const AutoScratchRegisterMaybeOutput&) =
1092
delete;
1093
void operator=(const AutoScratchRegisterMaybeOutput&) = delete;
1094
1095
public:
1096
AutoScratchRegisterMaybeOutput(CacheRegisterAllocator& alloc,
1097
MacroAssembler& masm,
1098
const AutoOutputRegister& output) {
1099
scratchReg_ = output.maybeReg();
1100
if (scratchReg_ == InvalidReg) {
1101
scratch_.emplace(alloc, masm);
1102
scratchReg_ = scratch_.ref();
1103
}
1104
}
1105
1106
operator Register() const { return scratchReg_; }
1107
};
1108
1109
// AutoCallVM is a wrapper class that unifies methods shared by
1110
// IonCacheIRCompiler and BaselineCacheIRCompiler that perform a callVM, but
1111
// require stub specific functionality before performing the VM call.
1112
//
1113
// Expected Usage:
1114
//
1115
// OPs with implementations that may be unified by this class must:
1116
// - Be listed in the CACHEIR_OPS list but not in the CACHE_IR_SHARED_OPS
1117
// list
1118
// - Differ only in their use of `AutoSaveLiveRegisters`,
1119
// `AutoOutputRegister`, and `AutoScratchRegister`. The Ion
1120
// implementation will use `AutoSaveLiveRegisters` and
1121
// `AutoOutputRegister`, while the Baseline implementation will use
1122
// `AutoScratchRegister`.
1123
// - Both use the `callVM` method.
1124
//
1125
// Using AutoCallVM:
1126
// - The constructor initializes `AutoOutputRegister` for both compiler
1127
// types. Additionally it initializes an `AutoSaveLiveRegisters` for
1128
// CacheIRCompilers with the mode Ion, and initializes
1129
// `AutoScratchRegisterMaybeOutput` and `AutoStubFrame` variables for
1130
// compilers with mode Baseline.
1131
// - The `prepare()` method calls the IonCacheIRCompiler method
1132
// `prepareVMCall` for IonCacheIRCompilers, calls the `enter()` method of
1133
// `AutoStubFrame` for BaselineCacheIRCompilers, and calls the
1134
// `discardStack` method of the `Register` class for both compiler types.
1135
// - The `call()` method invokes `callVM` on the CacheIRCompiler and stores
1136
// the call result according to its type. Finally it calls the `leave`
1137
// method of `AutoStubFrame` for BaselineCacheIRCompilers.
1138
//
1139
// Expected Usage Example:
1140
// See: `CacheIRCompiler::emitCallGetSparseElementResult()`
1141
//
1142
// Restrictions:
1143
// - OPs that do not meet the criteria listed above can not be unified with
1144
// AutoCallVM
1145
//
1146
1147
class MOZ_RAII AutoCallVM {
1148
MacroAssembler& masm_;
1149
CacheIRCompiler* compiler_;
1150
CacheRegisterAllocator& allocator_;
1151
mozilla::Maybe<AutoOutputRegister> output_;
1152
1153
// Baseline specific stuff
1154
mozilla::Maybe<AutoStubFrame> stubFrame_;
1155
mozilla::Maybe<AutoScratchRegisterMaybeOutput> scratch_;
1156
1157
// Ion specific stuff
1158
mozilla::Maybe<AutoSaveLiveRegisters> save_;
1159
1160
void storeResult(JSValueType returnType);
1161
1162
template <typename Fn>
1163
void storeResult();
1164
1165
public:
1166
AutoCallVM(MacroAssembler& masm, CacheIRCompiler* compiler,
1167
CacheRegisterAllocator& allocator);
1168
1169
void prepare();
1170
1171
template <typename Fn, Fn fn>
1172
void call() {
1173
compiler_->callVM<Fn, fn>(masm_);
1174
storeResult<Fn>();
1175
}
1176
};
1177
1178
// RAII class to allocate FloatReg0 as a scratch register and release it when
1179
// we're done with it. The previous contents of FloatReg0 may be spilled on the
1180
// stack and, if necessary, are restored when the destructor runs.
1181
//
1182
// When FailurePath is passed to the constructor, FailurePath::label() must not
1183
// be used during the life time of the AutoScratchFloatRegister. Instead use
1184
// AutoScratchFloatRegister::failure().
1185
class MOZ_RAII AutoScratchFloatRegister {
1186
Label failurePopReg_{};
1187
CacheIRCompiler* compiler_;
1188
FailurePath* failure_;
1189
1190
AutoScratchFloatRegister(const AutoScratchFloatRegister&) = delete;
1191
void operator=(const AutoScratchFloatRegister&) = delete;
1192
1193
public:
1194
explicit AutoScratchFloatRegister(CacheIRCompiler* compiler)
1195
: AutoScratchFloatRegister(compiler, nullptr) {}
1196
1197
AutoScratchFloatRegister(CacheIRCompiler* compiler, FailurePath* failure);
1198
1199
~AutoScratchFloatRegister();
1200
1201
Label* failure();
1202
1203
FloatRegister get() const { return FloatReg0; }
1204
operator FloatRegister() const { return FloatReg0; }
1205
};
1206
1207
// See the 'Sharing Baseline stub code' comment in CacheIR.h for a description
1208
// of this class.
1209
class CacheIRStubInfo {
1210
// These fields don't require 8 bits, but GCC complains if these fields are
1211
// smaller than the size of the enums.
1212
CacheKind kind_ : 8;
1213
ICStubEngine engine_ : 8;
1214
bool makesGCCalls_ : 1;
1215
uint8_t stubDataOffset_;
1216
1217
const uint8_t* code_;
1218
uint32_t length_;
1219
const uint8_t* fieldTypes_;
1220
1221
CacheIRStubInfo(CacheKind kind, ICStubEngine engine, bool makesGCCalls,
1222
uint32_t stubDataOffset, const uint8_t* code,
1223
uint32_t codeLength, const uint8_t* fieldTypes)
1224
: kind_(kind),
1225
engine_(engine),
1226
makesGCCalls_(makesGCCalls),
1227
stubDataOffset_(stubDataOffset),
1228
code_(code),
1229
length_(codeLength),
1230
fieldTypes_(fieldTypes) {
1231
MOZ_ASSERT(kind_ == kind, "Kind must fit in bitfield");
1232
MOZ_ASSERT(engine_ == engine, "Engine must fit in bitfield");
1233
MOZ_ASSERT(stubDataOffset_ == stubDataOffset,
1234
"stubDataOffset must fit in uint8_t");
1235
}
1236
1237
CacheIRStubInfo(const CacheIRStubInfo&) = delete;
1238
CacheIRStubInfo& operator=(const CacheIRStubInfo&) = delete;
1239
1240
public:
1241
CacheKind kind() const { return kind_; }
1242
ICStubEngine engine() const { return engine_; }
1243
bool makesGCCalls() const { return makesGCCalls_; }
1244
1245
const uint8_t* code() const { return code_; }
1246
uint32_t codeLength() const { return length_; }
1247
uint32_t stubDataOffset() const { return stubDataOffset_; }
1248
1249
size_t stubDataSize() const;
1250
1251
StubField::Type fieldType(uint32_t i) const {
1252
return (StubField::Type)fieldTypes_[i];
1253
}
1254
1255
static CacheIRStubInfo* New(CacheKind kind, ICStubEngine engine,
1256
bool canMakeCalls, uint32_t stubDataOffset,
1257
const CacheIRWriter& writer);
1258
1259
template <class Stub, class T>
1260
js::GCPtr<T>& getStubField(Stub* stub, uint32_t field) const;
1261
1262
template <class T>
1263
js::GCPtr<T>& getStubField(ICStub* stub, uint32_t field) const {
1264
return getStubField<ICStub, T>(stub, field);
1265
}
1266
1267
uintptr_t getStubRawWord(ICStub* stub, uint32_t field) const;
1268
};
1269
1270
template <typename T>
1271
void TraceCacheIRStub(JSTracer* trc, T* stub, const CacheIRStubInfo* stubInfo);
1272
1273
void LoadTypedThingData(MacroAssembler& masm, TypedThingLayout layout,
1274
Register obj, Register result);
1275
1276
void LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout,
1277
Register obj, Register result);
1278
1279
} // namespace jit
1280
} // namespace js
1281
1282
#endif /* jit_CacheIRCompiler_h */