Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/IonCacheIRCompiler.h"
8
#include "mozilla/DebugOnly.h"
9
#include "mozilla/Maybe.h"
10
11
#include <algorithm>
12
13
#include "jit/BaselineIC.h"
14
#include "jit/CacheIRCompiler.h"
15
#include "jit/IonIC.h"
16
#include "jit/JSJitFrameIter.h"
17
#include "jit/Linker.h"
18
#include "jit/SharedICHelpers.h"
19
#include "jit/VMFunctions.h"
20
#include "proxy/DeadObjectProxy.h"
21
#include "proxy/Proxy.h"
22
#include "util/Memory.h"
23
24
#include "jit/JSJitFrameIter-inl.h"
25
#include "jit/MacroAssembler-inl.h"
26
#include "jit/VMFunctionList-inl.h"
27
#include "vm/Realm-inl.h"
28
#include "vm/TypeInference-inl.h"
29
30
using namespace js;
31
using namespace js::jit;
32
33
using mozilla::DebugOnly;
34
using mozilla::Maybe;
35
36
namespace js {
37
namespace jit {
38
39
// IonCacheIRCompiler compiles CacheIR to IonIC native code.
40
IonCacheIRCompiler::IonCacheIRCompiler(
41
JSContext* cx, const CacheIRWriter& writer, IonIC* ic, IonScript* ionScript,
42
IonICStub* stub, const PropertyTypeCheckInfo* typeCheckInfo,
43
uint32_t stubDataOffset)
44
: CacheIRCompiler(cx, writer, stubDataOffset, Mode::Ion,
45
StubFieldPolicy::Constant),
46
writer_(writer),
47
ic_(ic),
48
ionScript_(ionScript),
49
stub_(stub),
50
typeCheckInfo_(typeCheckInfo),
51
savedLiveRegs_(false) {
52
MOZ_ASSERT(ic_);
53
MOZ_ASSERT(ionScript_);
54
}
55
56
template <typename T>
57
T IonCacheIRCompiler::rawWordStubField(uint32_t offset) {
58
static_assert(sizeof(T) == sizeof(uintptr_t), "T must have word size");
59
return (T)readStubWord(offset, StubField::Type::RawWord);
60
}
61
template <typename T>
62
T IonCacheIRCompiler::rawInt64StubField(uint32_t offset) {
63
static_assert(sizeof(T) == sizeof(int64_t), "T musthave int64 size");
64
return (T)readStubInt64(offset, StubField::Type::RawInt64);
65
}
66
67
uint64_t* IonCacheIRCompiler::expandoGenerationStubFieldPtr(uint32_t offset) {
68
DebugOnly<uint64_t> generation =
69
readStubInt64(offset, StubField::Type::DOMExpandoGeneration);
70
uint64_t* ptr = reinterpret_cast<uint64_t*>(stub_->stubDataStart() + offset);
71
MOZ_ASSERT(*ptr == generation);
72
return ptr;
73
}
74
75
template <typename Fn, Fn fn>
76
void IonCacheIRCompiler::callVM(MacroAssembler& masm) {
77
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
78
callVMInternal(masm, id);
79
}
80
81
bool IonCacheIRCompiler::needsPostBarrier() const {
82
return ic_->asSetPropertyIC()->needsPostBarrier();
83
}
84
85
void IonCacheIRCompiler::pushStubCodePointer() {
86
stubJitCodeOffset_.emplace(masm.PushWithPatch(ImmPtr((void*)-1)));
87
}
88
89
// AutoSaveLiveRegisters must be used when we make a call that can GC. The
90
// constructor ensures all live registers are stored on the stack (where the GC
91
// expects them) and the destructor restores these registers.
92
AutoSaveLiveRegisters::AutoSaveLiveRegisters(IonCacheIRCompiler& compiler)
93
: compiler_(compiler) {
94
MOZ_ASSERT(compiler_.liveRegs_.isSome());
95
MOZ_ASSERT(compiler_.ic_);
96
compiler_.allocator.saveIonLiveRegisters(
97
compiler_.masm, compiler_.liveRegs_.ref(),
98
compiler_.ic_->scratchRegisterForEntryJump(), compiler_.ionScript_);
99
compiler_.savedLiveRegs_ = true;
100
}
101
AutoSaveLiveRegisters::~AutoSaveLiveRegisters() {
102
MOZ_ASSERT(compiler_.stubJitCodeOffset_.isSome(),
103
"Must have pushed JitCode* pointer");
104
compiler_.allocator.restoreIonLiveRegisters(compiler_.masm,
105
compiler_.liveRegs_.ref());
106
MOZ_ASSERT(compiler_.masm.framePushed() == compiler_.ionScript_->frameSize());
107
}
108
109
} // namespace jit
110
} // namespace js
111
112
#define DEFINE_SHARED_OP(op) \
113
bool IonCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
114
CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
115
#undef DEFINE_SHARED_OP
116
117
void CacheRegisterAllocator::saveIonLiveRegisters(MacroAssembler& masm,
118
LiveRegisterSet liveRegs,
119
Register scratch,
120
IonScript* ionScript) {
121
// We have to push all registers in liveRegs on the stack. It's possible we
122
// stored other values in our live registers and stored operands on the
123
// stack (where our live registers should go), so this requires some careful
124
// work. Try to keep it simple by taking one small step at a time.
125
126
// Step 1. Discard any dead operands so we can reuse their registers.
127
freeDeadOperandLocations(masm);
128
129
// Step 2. Figure out the size of our live regs.
130
size_t sizeOfLiveRegsInBytes = liveRegs.gprs().size() * sizeof(intptr_t) +
131
liveRegs.fpus().getPushSizeInBytes();
132
133
MOZ_ASSERT(sizeOfLiveRegsInBytes > 0);
134
135
// Step 3. Ensure all non-input operands are on the stack.
136
size_t numInputs = writer_.numInputOperands();
137
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
138
OperandLocation& loc = operandLocations_[i];
139
if (loc.isInRegister()) {
140
spillOperandToStack(masm, &loc);
141
}
142
}
143
144
// Step 4. Restore the register state, but don't discard the stack as
145
// non-input operands are stored there.
146
restoreInputState(masm, /* shouldDiscardStack = */ false);
147
148
// We just restored the input state, so no input operands should be stored
149
// on the stack.
150
#ifdef DEBUG
151
for (size_t i = 0; i < numInputs; i++) {
152
const OperandLocation& loc = operandLocations_[i];
153
MOZ_ASSERT(!loc.isOnStack());
154
}
155
#endif
156
157
// Step 5. At this point our register state is correct. Stack values,
158
// however, may cover the space where we have to store the live registers.
159
// Move them out of the way.
160
161
bool hasOperandOnStack = false;
162
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
163
OperandLocation& loc = operandLocations_[i];
164
if (!loc.isOnStack()) {
165
continue;
166
}
167
168
hasOperandOnStack = true;
169
170
size_t operandSize = loc.stackSizeInBytes();
171
size_t operandStackPushed = loc.stackPushed();
172
MOZ_ASSERT(operandSize > 0);
173
MOZ_ASSERT(stackPushed_ >= operandStackPushed);
174
MOZ_ASSERT(operandStackPushed >= operandSize);
175
176
// If this operand doesn't cover the live register space, there's
177
// nothing to do.
178
if (operandStackPushed - operandSize >= sizeOfLiveRegsInBytes) {
179
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
180
continue;
181
}
182
183
// Reserve stack space for the live registers if needed.
184
if (sizeOfLiveRegsInBytes > stackPushed_) {
185
size_t extraBytes = sizeOfLiveRegsInBytes - stackPushed_;
186
MOZ_ASSERT((extraBytes % sizeof(uintptr_t)) == 0);
187
masm.subFromStackPtr(Imm32(extraBytes));
188
stackPushed_ += extraBytes;
189
}
190
191
// Push the operand below the live register space.
192
if (loc.kind() == OperandLocation::PayloadStack) {
193
masm.push(
194
Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
195
stackPushed_ += operandSize;
196
loc.setPayloadStack(stackPushed_, loc.payloadType());
197
continue;
198
}
199
MOZ_ASSERT(loc.kind() == OperandLocation::ValueStack);
200
masm.pushValue(
201
Address(masm.getStackPointer(), stackPushed_ - operandStackPushed));
202
stackPushed_ += operandSize;
203
loc.setValueStack(stackPushed_);
204
}
205
206
// Step 6. If we have any operands on the stack, adjust their stackPushed
207
// values to not include sizeOfLiveRegsInBytes (this simplifies code down
208
// the line). Then push/store the live registers.
209
if (hasOperandOnStack) {
210
MOZ_ASSERT(stackPushed_ > sizeOfLiveRegsInBytes);
211
stackPushed_ -= sizeOfLiveRegsInBytes;
212
213
for (size_t i = numInputs; i < operandLocations_.length(); i++) {
214
OperandLocation& loc = operandLocations_[i];
215
if (loc.isOnStack()) {
216
loc.adjustStackPushed(-int32_t(sizeOfLiveRegsInBytes));
217
}
218
}
219
220
size_t stackBottom = stackPushed_ + sizeOfLiveRegsInBytes;
221
masm.storeRegsInMask(liveRegs, Address(masm.getStackPointer(), stackBottom),
222
scratch);
223
masm.setFramePushed(masm.framePushed() + sizeOfLiveRegsInBytes);
224
} else {
225
// If no operands are on the stack, discard the unused stack space.
226
if (stackPushed_ > 0) {
227
masm.addToStackPtr(Imm32(stackPushed_));
228
stackPushed_ = 0;
229
}
230
masm.PushRegsInMask(liveRegs);
231
}
232
freePayloadSlots_.clear();
233
freeValueSlots_.clear();
234
235
MOZ_ASSERT(masm.framePushed() ==
236
ionScript->frameSize() + sizeOfLiveRegsInBytes);
237
238
// Step 7. All live registers and non-input operands are stored on the stack
239
// now, so at this point all registers except for the input registers are
240
// available.
241
availableRegs_.set() = GeneralRegisterSet::Not(inputRegisterSet());
242
availableRegsAfterSpill_.set() = GeneralRegisterSet();
243
244
// Step 8. We restored our input state, so we have to fix up aliased input
245
// registers again.
246
fixupAliasedInputs(masm);
247
}
248
249
void CacheRegisterAllocator::restoreIonLiveRegisters(MacroAssembler& masm,
250
LiveRegisterSet liveRegs) {
251
masm.PopRegsInMask(liveRegs);
252
253
availableRegs_.set() = GeneralRegisterSet();
254
availableRegsAfterSpill_.set() = GeneralRegisterSet::All();
255
}
256
257
static void* GetReturnAddressToIonCode(JSContext* cx) {
258
JSJitFrameIter frame(cx->activation()->asJit());
259
MOZ_ASSERT(frame.type() == FrameType::Exit,
260
"An exit frame is expected as update functions are called with a "
261
"VMFunction.");
262
263
void* returnAddr = frame.returnAddress();
264
#ifdef DEBUG
265
++frame;
266
MOZ_ASSERT(frame.isIonJS());
267
#endif
268
return returnAddr;
269
}
270
271
// The AutoSaveLiveRegisters parameter is used to ensure registers were saved
272
void IonCacheIRCompiler::prepareVMCall(MacroAssembler& masm,
273
const AutoSaveLiveRegisters&) {
274
uint32_t descriptor = MakeFrameDescriptor(
275
masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
276
pushStubCodePointer();
277
masm.Push(Imm32(descriptor));
278
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
279
280
preparedForVMCall_ = true;
281
}
282
283
bool IonCacheIRCompiler::init() {
284
if (!allocator.init()) {
285
return false;
286
}
287
288
size_t numInputs = writer_.numInputOperands();
289
290
AllocatableGeneralRegisterSet available;
291
292
switch (ic_->kind()) {
293
case CacheKind::GetProp:
294
case CacheKind::GetElem: {
295
IonGetPropertyIC* ic = ic_->asGetPropertyIC();
296
TypedOrValueRegister output = ic->output();
297
298
if (output.hasValue()) {
299
available.add(output.valueReg());
300
} else if (!output.typedReg().isFloat()) {
301
available.add(output.typedReg().gpr());
302
}
303
304
if (ic->maybeTemp() != InvalidReg) {
305
available.add(ic->maybeTemp());
306
}
307
308
liveRegs_.emplace(ic->liveRegs());
309
outputUnchecked_.emplace(output);
310
311
allowDoubleResult_.emplace(ic->allowDoubleResult());
312
313
MOZ_ASSERT(numInputs == 1 || numInputs == 2);
314
315
allocator.initInputLocation(0, ic->value());
316
if (numInputs > 1) {
317
allocator.initInputLocation(1, ic->id());
318
}
319
break;
320
}
321
case CacheKind::GetPropSuper:
322
case CacheKind::GetElemSuper: {
323
IonGetPropSuperIC* ic = ic_->asGetPropSuperIC();
324
TypedOrValueRegister output = ic->output();
325
326
available.add(output.valueReg());
327
328
liveRegs_.emplace(ic->liveRegs());
329
outputUnchecked_.emplace(output);
330
331
allowDoubleResult_.emplace(true);
332
333
MOZ_ASSERT(numInputs == 2 || numInputs == 3);
334
335
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
336
337
if (ic->kind() == CacheKind::GetPropSuper) {
338
MOZ_ASSERT(numInputs == 2);
339
allocator.initInputLocation(1, ic->receiver());
340
} else {
341
MOZ_ASSERT(numInputs == 3);
342
allocator.initInputLocation(1, ic->id());
343
allocator.initInputLocation(2, ic->receiver());
344
}
345
break;
346
}
347
case CacheKind::SetProp:
348
case CacheKind::SetElem: {
349
IonSetPropertyIC* ic = ic_->asSetPropertyIC();
350
351
available.add(ic->temp());
352
353
liveRegs_.emplace(ic->liveRegs());
354
355
allocator.initInputLocation(0, ic->object(), JSVAL_TYPE_OBJECT);
356
357
if (ic->kind() == CacheKind::SetProp) {
358
MOZ_ASSERT(numInputs == 2);
359
allocator.initInputLocation(1, ic->rhs());
360
} else {
361
MOZ_ASSERT(numInputs == 3);
362
allocator.initInputLocation(1, ic->id());
363
allocator.initInputLocation(2, ic->rhs());
364
}
365
break;
366
}
367
case CacheKind::GetName: {
368
IonGetNameIC* ic = ic_->asGetNameIC();
369
ValueOperand output = ic->output();
370
371
available.add(output);
372
available.add(ic->temp());
373
374
liveRegs_.emplace(ic->liveRegs());
375
outputUnchecked_.emplace(output);
376
377
MOZ_ASSERT(numInputs == 1);
378
allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
379
break;
380
}
381
case CacheKind::BindName: {
382
IonBindNameIC* ic = ic_->asBindNameIC();
383
Register output = ic->output();
384
385
available.add(output);
386
available.add(ic->temp());
387
388
liveRegs_.emplace(ic->liveRegs());
389
outputUnchecked_.emplace(
390
TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
391
392
MOZ_ASSERT(numInputs == 1);
393
allocator.initInputLocation(0, ic->environment(), JSVAL_TYPE_OBJECT);
394
break;
395
}
396
case CacheKind::GetIterator: {
397
IonGetIteratorIC* ic = ic_->asGetIteratorIC();
398
Register output = ic->output();
399
400
available.add(output);
401
available.add(ic->temp1());
402
available.add(ic->temp2());
403
404
liveRegs_.emplace(ic->liveRegs());
405
outputUnchecked_.emplace(
406
TypedOrValueRegister(MIRType::Object, AnyRegister(output)));
407
408
MOZ_ASSERT(numInputs == 1);
409
allocator.initInputLocation(0, ic->value());
410
break;
411
}
412
case CacheKind::In: {
413
IonInIC* ic = ic_->asInIC();
414
Register output = ic->output();
415
416
available.add(output);
417
418
liveRegs_.emplace(ic->liveRegs());
419
outputUnchecked_.emplace(
420
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
421
422
MOZ_ASSERT(numInputs == 2);
423
allocator.initInputLocation(0, ic->key());
424
allocator.initInputLocation(
425
1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->object())));
426
break;
427
}
428
case CacheKind::HasOwn: {
429
IonHasOwnIC* ic = ic_->asHasOwnIC();
430
Register output = ic->output();
431
432
available.add(output);
433
434
liveRegs_.emplace(ic->liveRegs());
435
outputUnchecked_.emplace(
436
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
437
438
MOZ_ASSERT(numInputs == 2);
439
allocator.initInputLocation(0, ic->id());
440
allocator.initInputLocation(1, ic->value());
441
break;
442
}
443
case CacheKind::InstanceOf: {
444
IonInstanceOfIC* ic = ic_->asInstanceOfIC();
445
Register output = ic->output();
446
available.add(output);
447
liveRegs_.emplace(ic->liveRegs());
448
outputUnchecked_.emplace(
449
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
450
451
MOZ_ASSERT(numInputs == 2);
452
allocator.initInputLocation(0, ic->lhs());
453
allocator.initInputLocation(
454
1, TypedOrValueRegister(MIRType::Object, AnyRegister(ic->rhs())));
455
break;
456
}
457
case CacheKind::UnaryArith: {
458
IonUnaryArithIC* ic = ic_->asUnaryArithIC();
459
ValueOperand output = ic->output();
460
461
available.add(output);
462
463
liveRegs_.emplace(ic->liveRegs());
464
outputUnchecked_.emplace(TypedOrValueRegister(output));
465
466
MOZ_ASSERT(numInputs == 1);
467
allocator.initInputLocation(0, ic->input());
468
break;
469
}
470
case CacheKind::BinaryArith: {
471
IonBinaryArithIC* ic = ic_->asBinaryArithIC();
472
ValueOperand output = ic->output();
473
474
available.add(output);
475
476
liveRegs_.emplace(ic->liveRegs());
477
outputUnchecked_.emplace(TypedOrValueRegister(output));
478
479
MOZ_ASSERT(numInputs == 2);
480
allocator.initInputLocation(0, ic->lhs());
481
allocator.initInputLocation(1, ic->rhs());
482
break;
483
}
484
case CacheKind::Compare: {
485
IonCompareIC* ic = ic_->asCompareIC();
486
Register output = ic->output();
487
488
available.add(output);
489
490
liveRegs_.emplace(ic->liveRegs());
491
outputUnchecked_.emplace(
492
TypedOrValueRegister(MIRType::Boolean, AnyRegister(output)));
493
494
MOZ_ASSERT(numInputs == 2);
495
allocator.initInputLocation(0, ic->lhs());
496
allocator.initInputLocation(1, ic->rhs());
497
break;
498
}
499
case CacheKind::Call:
500
case CacheKind::TypeOf:
501
case CacheKind::ToBool:
502
case CacheKind::GetIntrinsic:
503
case CacheKind::NewObject:
504
MOZ_CRASH("Unsupported IC");
505
}
506
507
if (liveRegs_) {
508
liveFloatRegs_ = LiveFloatRegisterSet(liveRegs_->fpus());
509
}
510
511
allocator.initAvailableRegs(available);
512
allocator.initAvailableRegsAfterSpill();
513
return true;
514
}
515
516
JitCode* IonCacheIRCompiler::compile() {
517
masm.setFramePushed(ionScript_->frameSize());
518
if (cx_->runtime()->geckoProfiler().enabled()) {
519
masm.enableProfilingInstrumentation();
520
}
521
522
allocator.fixupAliasedInputs(masm);
523
524
do {
525
switch (reader.readOp()) {
526
#define DEFINE_OP(op, ...) \
527
case CacheOp::op: \
528
if (!emit##op()) return nullptr; \
529
break;
530
CACHE_IR_OPS(DEFINE_OP)
531
#undef DEFINE_OP
532
533
default:
534
MOZ_CRASH("Invalid op");
535
}
536
#ifdef DEBUG
537
assertAllArgumentsConsumed();
538
#endif
539
allocator.nextOp();
540
} while (reader.more());
541
542
masm.assumeUnreachable("Should have returned from IC");
543
544
// Done emitting the main IC code. Now emit the failure paths.
545
for (size_t i = 0; i < failurePaths.length(); i++) {
546
if (!emitFailurePath(i)) {
547
return nullptr;
548
}
549
Register scratch = ic_->scratchRegisterForEntryJump();
550
CodeOffset offset = masm.movWithPatch(ImmWord(-1), scratch);
551
masm.jump(Address(scratch, 0));
552
if (!nextCodeOffsets_.append(offset)) {
553
return nullptr;
554
}
555
}
556
557
Linker linker(masm);
558
Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Ion));
559
if (!newStubCode) {
560
cx_->recoverFromOutOfMemory();
561
return nullptr;
562
}
563
564
for (CodeOffset offset : nextCodeOffsets_) {
565
Assembler::PatchDataWithValueCheck(CodeLocationLabel(newStubCode, offset),
566
ImmPtr(stub_->nextCodeRawPtr()),
567
ImmPtr((void*)-1));
568
}
569
if (stubJitCodeOffset_) {
570
Assembler::PatchDataWithValueCheck(
571
CodeLocationLabel(newStubCode, *stubJitCodeOffset_),
572
ImmPtr(newStubCode.get()), ImmPtr((void*)-1));
573
}
574
575
return newStubCode;
576
}
577
578
bool IonCacheIRCompiler::emitGuardShape() {
579
JitSpew(JitSpew_Codegen, __FUNCTION__);
580
ObjOperandId objId = reader.objOperandId();
581
Register obj = allocator.useRegister(masm, objId);
582
Shape* shape = shapeStubField(reader.stubOffset());
583
584
bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
585
586
Maybe<AutoScratchRegister> maybeScratch;
587
if (needSpectreMitigations) {
588
maybeScratch.emplace(allocator, masm);
589
}
590
591
FailurePath* failure;
592
if (!addFailurePath(&failure)) {
593
return false;
594
}
595
596
if (needSpectreMitigations) {
597
masm.branchTestObjShape(Assembler::NotEqual, obj, shape, *maybeScratch, obj,
598
failure->label());
599
} else {
600
masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, shape,
601
failure->label());
602
}
603
604
return true;
605
}
606
607
bool IonCacheIRCompiler::emitGuardGroup() {
608
JitSpew(JitSpew_Codegen, __FUNCTION__);
609
ObjOperandId objId = reader.objOperandId();
610
Register obj = allocator.useRegister(masm, objId);
611
ObjectGroup* group = groupStubField(reader.stubOffset());
612
613
bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
614
615
Maybe<AutoScratchRegister> maybeScratch;
616
if (needSpectreMitigations) {
617
maybeScratch.emplace(allocator, masm);
618
}
619
620
FailurePath* failure;
621
if (!addFailurePath(&failure)) {
622
return false;
623
}
624
625
if (needSpectreMitigations) {
626
masm.branchTestObjGroup(Assembler::NotEqual, obj, group, *maybeScratch, obj,
627
failure->label());
628
} else {
629
masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, group,
630
failure->label());
631
}
632
633
return true;
634
}
635
636
bool IonCacheIRCompiler::emitGuardProto() {
637
JitSpew(JitSpew_Codegen, __FUNCTION__);
638
Register obj = allocator.useRegister(masm, reader.objOperandId());
639
JSObject* proto = objectStubField(reader.stubOffset());
640
641
AutoScratchRegister scratch(allocator, masm);
642
643
FailurePath* failure;
644
if (!addFailurePath(&failure)) {
645
return false;
646
}
647
648
masm.loadObjProto(obj, scratch);
649
masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(proto),
650
failure->label());
651
return true;
652
}
653
654
bool IonCacheIRCompiler::emitGuardCompartment() {
655
JitSpew(JitSpew_Codegen, __FUNCTION__);
656
Register obj = allocator.useRegister(masm, reader.objOperandId());
657
JSObject* globalWrapper = objectStubField(reader.stubOffset());
658
JS::Compartment* compartment = compartmentStubField(reader.stubOffset());
659
AutoScratchRegister scratch(allocator, masm);
660
661
FailurePath* failure;
662
if (!addFailurePath(&failure)) {
663
return false;
664
}
665
666
// Verify that the global wrapper is still valid, as
667
// it is pre-requisite for doing the compartment check.
668
masm.movePtr(ImmGCPtr(globalWrapper), scratch);
669
Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
670
masm.branchPtr(Assembler::Equal, handlerAddr,
671
ImmPtr(&DeadObjectProxy::singleton), failure->label());
672
673
masm.branchTestObjCompartment(Assembler::NotEqual, obj, compartment, scratch,
674
failure->label());
675
return true;
676
}
677
678
bool IonCacheIRCompiler::emitGuardAnyClass() {
679
JitSpew(JitSpew_Codegen, __FUNCTION__);
680
ObjOperandId objId = reader.objOperandId();
681
Register obj = allocator.useRegister(masm, objId);
682
AutoScratchRegister scratch(allocator, masm);
683
684
const JSClass* clasp = classStubField(reader.stubOffset());
685
686
FailurePath* failure;
687
if (!addFailurePath(&failure)) {
688
return false;
689
}
690
691
if (objectGuardNeedsSpectreMitigations(objId)) {
692
masm.branchTestObjClass(Assembler::NotEqual, obj, clasp, scratch, obj,
693
failure->label());
694
} else {
695
masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, clasp,
696
scratch, failure->label());
697
}
698
699
return true;
700
}
701
702
bool IonCacheIRCompiler::emitGuardHasProxyHandler() {
703
JitSpew(JitSpew_Codegen, __FUNCTION__);
704
Register obj = allocator.useRegister(masm, reader.objOperandId());
705
const void* handler = proxyHandlerStubField(reader.stubOffset());
706
707
FailurePath* failure;
708
if (!addFailurePath(&failure)) {
709
return false;
710
}
711
712
Address handlerAddr(obj, ProxyObject::offsetOfHandler());
713
masm.branchPtr(Assembler::NotEqual, handlerAddr, ImmPtr(handler),
714
failure->label());
715
return true;
716
}
717
718
bool IonCacheIRCompiler::emitGuardSpecificObject() {
719
JitSpew(JitSpew_Codegen, __FUNCTION__);
720
Register obj = allocator.useRegister(masm, reader.objOperandId());
721
JSObject* expected = objectStubField(reader.stubOffset());
722
723
FailurePath* failure;
724
if (!addFailurePath(&failure)) {
725
return false;
726
}
727
728
masm.branchPtr(Assembler::NotEqual, obj, ImmGCPtr(expected),
729
failure->label());
730
return true;
731
}
732
733
bool IonCacheIRCompiler::emitGuardSpecificAtom() {
734
JitSpew(JitSpew_Codegen, __FUNCTION__);
735
Register str = allocator.useRegister(masm, reader.stringOperandId());
736
AutoScratchRegister scratch(allocator, masm);
737
738
JSAtom* atom = &stringStubField(reader.stubOffset())->asAtom();
739
740
FailurePath* failure;
741
if (!addFailurePath(&failure)) {
742
return false;
743
}
744
745
Label done;
746
masm.branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
747
748
// The pointers are not equal, so if the input string is also an atom it
749
// must be a different string.
750
masm.branchTest32(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
751
Imm32(JSString::NON_ATOM_BIT), failure->label());
752
753
// Check the length.
754
masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
755
Imm32(atom->length()), failure->label());
756
757
// We have a non-atomized string with the same length. Call a helper
758
// function to do the comparison.
759
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
760
liveVolatileFloatRegs());
761
masm.PushRegsInMask(volatileRegs);
762
763
masm.setupUnalignedABICall(scratch);
764
masm.movePtr(ImmGCPtr(atom), scratch);
765
masm.passABIArg(scratch);
766
masm.passABIArg(str);
767
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelperPure));
768
masm.mov(ReturnReg, scratch);
769
770
LiveRegisterSet ignore;
771
ignore.add(scratch);
772
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
773
masm.branchIfFalseBool(scratch, failure->label());
774
775
masm.bind(&done);
776
return true;
777
}
778
779
bool IonCacheIRCompiler::emitGuardSpecificSymbol() {
780
JitSpew(JitSpew_Codegen, __FUNCTION__);
781
Register sym = allocator.useRegister(masm, reader.symbolOperandId());
782
JS::Symbol* expected = symbolStubField(reader.stubOffset());
783
784
FailurePath* failure;
785
if (!addFailurePath(&failure)) {
786
return false;
787
}
788
789
masm.branchPtr(Assembler::NotEqual, sym, ImmGCPtr(expected),
790
failure->label());
791
return true;
792
}
793
794
bool IonCacheIRCompiler::emitLoadValueResult() {
795
MOZ_CRASH("Baseline-specific op");
796
}
797
798
bool IonCacheIRCompiler::emitLoadFixedSlotResult() {
799
JitSpew(JitSpew_Codegen, __FUNCTION__);
800
AutoOutputRegister output(*this);
801
Register obj = allocator.useRegister(masm, reader.objOperandId());
802
int32_t offset = int32StubField(reader.stubOffset());
803
masm.loadTypedOrValue(Address(obj, offset), output);
804
return true;
805
}
806
807
bool IonCacheIRCompiler::emitLoadDynamicSlotResult() {
808
JitSpew(JitSpew_Codegen, __FUNCTION__);
809
AutoOutputRegister output(*this);
810
Register obj = allocator.useRegister(masm, reader.objOperandId());
811
int32_t offset = int32StubField(reader.stubOffset());
812
813
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
814
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
815
masm.loadTypedOrValue(Address(scratch, offset), output);
816
return true;
817
}
818
819
bool IonCacheIRCompiler::emitGuardHasGetterSetter() {
820
JitSpew(JitSpew_Codegen, __FUNCTION__);
821
Register obj = allocator.useRegister(masm, reader.objOperandId());
822
Shape* shape = shapeStubField(reader.stubOffset());
823
824
AutoScratchRegister scratch1(allocator, masm);
825
AutoScratchRegister scratch2(allocator, masm);
826
827
FailurePath* failure;
828
if (!addFailurePath(&failure)) {
829
return false;
830
}
831
832
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
833
liveVolatileFloatRegs());
834
volatileRegs.takeUnchecked(scratch1);
835
volatileRegs.takeUnchecked(scratch2);
836
masm.PushRegsInMask(volatileRegs);
837
838
masm.setupUnalignedABICall(scratch1);
839
masm.loadJSContext(scratch1);
840
masm.passABIArg(scratch1);
841
masm.passABIArg(obj);
842
masm.movePtr(ImmGCPtr(shape), scratch2);
843
masm.passABIArg(scratch2);
844
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectHasGetterSetterPure));
845
masm.mov(ReturnReg, scratch1);
846
masm.PopRegsInMask(volatileRegs);
847
848
masm.branchIfFalseBool(scratch1, failure->label());
849
return true;
850
}
851
852
bool IonCacheIRCompiler::emitCallScriptedGetterResultShared(
853
TypedOrValueRegister receiver, TypedOrValueRegister output) {
854
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
855
AutoScratchRegister scratch(allocator, masm);
856
857
bool isSameRealm = reader.readBool();
858
MOZ_ASSERT(isSameRealm == (cx_->realm() == target->realm()));
859
860
allocator.discardStack(masm);
861
862
uint32_t framePushedBefore = masm.framePushed();
863
864
// Construct IonICCallFrameLayout.
865
uint32_t descriptor = MakeFrameDescriptor(
866
masm.framePushed(), FrameType::IonJS, IonICCallFrameLayout::Size());
867
pushStubCodePointer();
868
masm.Push(Imm32(descriptor));
869
masm.Push(ImmPtr(GetReturnAddressToIonCode(cx_)));
870
871
// The JitFrameLayout pushed below will be aligned to JitStackAlignment,
872
// so we just have to make sure the stack is aligned after we push the
873
// |this| + argument Values.
874
uint32_t argSize = (target->nargs() + 1) * sizeof(Value);
875
uint32_t padding =
876
ComputeByteAlignment(masm.framePushed() + argSize, JitStackAlignment);
877
MOZ_ASSERT(padding % sizeof(uintptr_t) == 0);
878
MOZ_ASSERT(padding < JitStackAlignment);
879
masm.reserveStack(padding);
880
881
for (size_t i = 0; i < target->nargs(); i++) {
882
masm.Push(UndefinedValue());
883
}
884
masm.Push(receiver);
885
886
if (!isSameRealm) {
887
masm.switchToRealm(target->realm(), scratch);
888
}
889
890
masm.movePtr(ImmGCPtr(target), scratch);
891
892
descriptor = MakeFrameDescriptor(argSize + padding, FrameType::IonICCall,
893
JitFrameLayout::Size());
894
masm.Push(Imm32(0)); // argc
895
masm.Push(scratch);
896
masm.Push(Imm32(descriptor));
897
898
// Check stack alignment. Add sizeof(uintptr_t) for the return address.
899
MOZ_ASSERT(((masm.framePushed() + sizeof(uintptr_t)) % JitStackAlignment) ==
900
0);
901
902
// The getter currently has a jit entry or a non-lazy script. We will only
903
// relazify when we do a shrinking GC and when that happens we will also
904
// purge IC stubs.
905
MOZ_ASSERT(target->hasJitEntry());
906
masm.loadJitCodeRaw(scratch, scratch);
907
masm.callJit(scratch);
908
909
if (!isSameRealm) {
910
static_assert(!JSReturnOperand.aliases(ReturnReg),
911
"ReturnReg available as scratch after scripted calls");
912
masm.switchToRealm(cx_->realm(), ReturnReg);
913
}
914
915
masm.storeCallResultValue(output);
916
masm.freeStack(masm.framePushed() - framePushedBefore);
917
return true;
918
}
919
920
bool IonCacheIRCompiler::emitCallScriptedGetterResult() {
921
JitSpew(JitSpew_Codegen, __FUNCTION__);
922
AutoSaveLiveRegisters save(*this);
923
AutoOutputRegister output(*this);
924
925
Register obj = allocator.useRegister(masm, reader.objOperandId());
926
927
return emitCallScriptedGetterResultShared(
928
TypedOrValueRegister(MIRType::Object, AnyRegister(obj)), output);
929
}
930
931
bool IonCacheIRCompiler::emitCallScriptedGetterByValueResult() {
932
JitSpew(JitSpew_Codegen, __FUNCTION__);
933
AutoSaveLiveRegisters save(*this);
934
AutoOutputRegister output(*this);
935
936
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
937
938
return emitCallScriptedGetterResultShared(val, output);
939
}
940
941
bool IonCacheIRCompiler::emitCallNativeGetterResultShared(
942
TypedOrValueRegister receiver, const AutoOutputRegister& output,
943
AutoSaveLiveRegisters& save) {
944
JSFunction* target = &objectStubField(reader.stubOffset())->as<JSFunction>();
945
MOZ_ASSERT(target->isNative());
946
947
AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
948
AutoScratchRegister argUintN(allocator, masm);
949
AutoScratchRegister argVp(allocator, masm);
950
AutoScratchRegister scratch(allocator, masm);
951
952
allocator.discardStack(masm);
953
954
// Native functions have the signature:
955
// bool (*)(JSContext*, unsigned, Value* vp)
956
// Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward
957
// are the function arguments.
958
959
// Construct vp array:
960
// Push receiver value for |this|
961
masm.Push(receiver);
962
// Push callee/outparam.
963
masm.Push(ObjectValue(*target));
964
965
// Preload arguments into registers.
966
masm.loadJSContext(argJSContext);
967
masm.move32(Imm32(0), argUintN);
968
masm.moveStackPtrTo(argVp.get());
969
970
// Push marking data for later use.
971
masm.Push(argUintN);
972
pushStubCodePointer();
973
974
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
975
return false;
976
}
977
masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLNative);
978
979
if (target->realm() != cx_->realm()) {
980
masm.switchToRealm(target->realm(), scratch);
981
}
982
983
// Construct and execute call.
984
masm.setupUnalignedABICall(scratch);
985
masm.passABIArg(argJSContext);
986
masm.passABIArg(argUintN);
987
masm.passABIArg(argVp);
988
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->native()),
989
MoveOp::GENERAL,
990
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
991
992
// Test for failure.
993
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
994
995
if (target->realm() != cx_->realm()) {
996
masm.switchToRealm(cx_->realm(), ReturnReg);
997
}
998
999
// Load the outparam vp[0] into output register(s).
1000
Address outparam(masm.getStackPointer(),
1001
IonOOLNativeExitFrameLayout::offsetOfResult());
1002
masm.loadValue(outparam, output.valueReg());
1003
1004
if (JitOptions.spectreJitToCxxCalls) {
1005
masm.speculationBarrier();
1006
}
1007
1008
masm.adjustStack(IonOOLNativeExitFrameLayout::Size(0));
1009
return true;
1010
}
1011
1012
bool IonCacheIRCompiler::emitCallNativeGetterResult() {
1013
JitSpew(JitSpew_Codegen, __FUNCTION__);
1014
AutoSaveLiveRegisters save(*this);
1015
AutoOutputRegister output(*this);
1016
1017
Register obj = allocator.useRegister(masm, reader.objOperandId());
1018
1019
return emitCallNativeGetterResultShared(
1020
TypedOrValueRegister(MIRType::Object, AnyRegister(obj)), output, save);
1021
}
1022
1023
bool IonCacheIRCompiler::emitCallNativeGetterByValueResult() {
1024
JitSpew(JitSpew_Codegen, __FUNCTION__);
1025
AutoSaveLiveRegisters save(*this);
1026
AutoOutputRegister output(*this);
1027
1028
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1029
1030
return emitCallNativeGetterResultShared(val, output, save);
1031
}
1032
1033
bool IonCacheIRCompiler::emitCallProxyGetResult() {
1034
JitSpew(JitSpew_Codegen, __FUNCTION__);
1035
AutoSaveLiveRegisters save(*this);
1036
AutoOutputRegister output(*this);
1037
1038
Register obj = allocator.useRegister(masm, reader.objOperandId());
1039
jsid id = idStubField(reader.stubOffset());
1040
1041
// ProxyGetProperty(JSContext* cx, HandleObject proxy, HandleId id,
1042
// MutableHandleValue vp)
1043
AutoScratchRegisterMaybeOutput argJSContext(allocator, masm, output);
1044
AutoScratchRegister argProxy(allocator, masm);
1045
AutoScratchRegister argId(allocator, masm);
1046
AutoScratchRegister argVp(allocator, masm);
1047
AutoScratchRegister scratch(allocator, masm);
1048
1049
allocator.discardStack(masm);
1050
1051
// Push stubCode for marking.
1052
pushStubCodePointer();
1053
1054
// Push args on stack first so we can take pointers to make handles.
1055
masm.Push(UndefinedValue());
1056
masm.moveStackPtrTo(argVp.get());
1057
1058
masm.Push(id, scratch);
1059
masm.moveStackPtrTo(argId.get());
1060
1061
// Push the proxy. Also used as receiver.
1062
masm.Push(obj);
1063
masm.moveStackPtrTo(argProxy.get());
1064
1065
masm.loadJSContext(argJSContext);
1066
1067
if (!masm.icBuildOOLFakeExitFrame(GetReturnAddressToIonCode(cx_), save)) {
1068
return false;
1069
}
1070
masm.enterFakeExitFrame(argJSContext, scratch, ExitFrameType::IonOOLProxy);
1071
1072
// Make the call.
1073
masm.setupUnalignedABICall(scratch);
1074
masm.passABIArg(argJSContext);
1075
masm.passABIArg(argProxy);
1076
masm.passABIArg(argId);
1077
masm.passABIArg(argVp);
1078
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ProxyGetProperty),
1079
MoveOp::GENERAL,
1080
CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1081
1082
// Test for failure.
1083
masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel());
1084
1085
// Load the outparam vp[0] into output register(s).
1086
Address outparam(masm.getStackPointer(),
1087
IonOOLProxyExitFrameLayout::offsetOfResult());
1088
masm.loadValue(outparam, output.valueReg());
1089
1090
// Spectre mitigation in case of speculative execution within C++ code.
1091
if (JitOptions.spectreJitToCxxCalls) {
1092
masm.speculationBarrier();
1093
}
1094
1095
// masm.leaveExitFrame & pop locals
1096
masm.adjustStack(IonOOLProxyExitFrameLayout::Size());
1097
return true;
1098
}
1099
1100
bool IonCacheIRCompiler::emitGuardFrameHasNoArgumentsObject() {
1101
MOZ_CRASH("Baseline-specific op");
1102
}
1103
1104
bool IonCacheIRCompiler::emitLoadFrameCalleeResult() {
1105
MOZ_CRASH("Baseline-specific op");
1106
}
1107
1108
bool IonCacheIRCompiler::emitLoadFrameNumActualArgsResult() {
1109
MOZ_CRASH("Baseline-specific op");
1110
}
1111
1112
bool IonCacheIRCompiler::emitLoadFrameArgumentResult() {
1113
MOZ_CRASH("Baseline-specific op");
1114
}
1115
1116
bool IonCacheIRCompiler::emitLoadEnvironmentFixedSlotResult() {
1117
JitSpew(JitSpew_Codegen, __FUNCTION__);
1118
AutoOutputRegister output(*this);
1119
Register obj = allocator.useRegister(masm, reader.objOperandId());
1120
int32_t offset = int32StubField(reader.stubOffset());
1121
1122
FailurePath* failure;
1123
if (!addFailurePath(&failure)) {
1124
return false;
1125
}
1126
1127
// Check for uninitialized lexicals.
1128
Address slot(obj, offset);
1129
masm.branchTestMagic(Assembler::Equal, slot, failure->label());
1130
1131
// Load the value.
1132
masm.loadTypedOrValue(slot, output);
1133
return true;
1134
}
1135
1136
bool IonCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult() {
1137
JitSpew(JitSpew_Codegen, __FUNCTION__);
1138
AutoOutputRegister output(*this);
1139
Register obj = allocator.useRegister(masm, reader.objOperandId());
1140
int32_t offset = int32StubField(reader.stubOffset());
1141
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
1142
1143
FailurePath* failure;
1144
if (!addFailurePath(&failure)) {
1145
return false;
1146
}
1147
1148
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1149
1150
// Check for uninitialized lexicals.
1151
Address slot(scratch, offset);
1152
masm.branchTestMagic(Assembler::Equal, slot, failure->label());
1153
1154
// Load the value.
1155
masm.loadTypedOrValue(slot, output);
1156
return true;
1157
}
1158
1159
bool IonCacheIRCompiler::emitLoadStringResult() {
1160
JitSpew(JitSpew_Codegen, __FUNCTION__);
1161
MOZ_CRASH("not used in ion");
1162
}
1163
1164
bool IonCacheIRCompiler::emitCompareStringResult() {
1165
JitSpew(JitSpew_Codegen, __FUNCTION__);
1166
AutoSaveLiveRegisters save(*this);
1167
AutoOutputRegister output(*this);
1168
1169
Register left = allocator.useRegister(masm, reader.stringOperandId());
1170
Register right = allocator.useRegister(masm, reader.stringOperandId());
1171
JSOp op = reader.jsop();
1172
1173
allocator.discardStack(masm);
1174
1175
Label slow, done;
1176
MOZ_ASSERT(!output.hasValue());
1177
masm.compareStrings(op, left, right, output.typedReg().gpr(), &slow);
1178
1179
masm.jump(&done);
1180
masm.bind(&slow);
1181
1182
prepareVMCall(masm, save);
1183
1184
// Push the operands in reverse order for JSOp::Le and JSOp::Gt:
1185
// - |left <= right| is implemented as |right >= left|.
1186
// - |left > right| is implemented as |right < left|.
1187
if (op == JSOp::Le || op == JSOp::Gt) {
1188
masm.Push(left);
1189
masm.Push(right);
1190
} else {
1191
masm.Push(right);
1192
masm.Push(left);
1193
}
1194
1195
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
1196
if (op == JSOp::Eq || op == JSOp::StrictEq) {
1197
callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
1198
} else if (op == JSOp::Ne || op == JSOp::StrictNe) {
1199
callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
1200
} else if (op == JSOp::Lt || op == JSOp::Gt) {
1201
callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
1202
} else {
1203
MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
1204
callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
1205
}
1206
1207
masm.storeCallBoolResult(output.typedReg().gpr());
1208
masm.bind(&done);
1209
return true;
1210
}
1211
1212
static bool GroupHasPropertyTypes(ObjectGroup* group, jsid* id, Value* v) {
1213
AutoUnsafeCallWithABI unsafe;
1214
if (group->unknownPropertiesDontCheckGeneration()) {
1215
return true;
1216
}
1217
HeapTypeSet* propTypes = group->maybeGetPropertyDontCheckGeneration(*id);
1218
if (!propTypes) {
1219
return true;
1220
}
1221
if (!propTypes->nonConstantProperty()) {
1222
return false;
1223
}
1224
return propTypes->hasType(TypeSet::GetValueType(*v));
1225
}
1226
1227
static void EmitCheckPropertyTypes(MacroAssembler& masm,
1228
const PropertyTypeCheckInfo* typeCheckInfo,
1229
Register obj, const ConstantOrRegister& val,
1230
const LiveRegisterSet& liveRegs,
1231
Label* failures) {
1232
// Emit code to check |val| is part of the property's HeapTypeSet.
1233
1234
if (!typeCheckInfo->isSet()) {
1235
return;
1236
}
1237
1238
ObjectGroup* group = typeCheckInfo->group();
1239
AutoSweepObjectGroup sweep(group);
1240
if (group->unknownProperties(sweep)) {
1241
return;
1242
}
1243
1244
jsid id = typeCheckInfo->id();
1245
HeapTypeSet* propTypes = group->maybeGetProperty(sweep, id);
1246
if (propTypes && propTypes->unknown()) {
1247
return;
1248
}
1249
1250
// Use the object register as scratch, as we don't need it here.
1251
masm.Push(obj);
1252
Register scratch1 = obj;
1253
1254
// We may also need a scratch register for guardTypeSet. Additionally,
1255
// spectreRegToZero is the register that may be zeroed on speculatively
1256
// executed paths.
1257
Register objScratch = InvalidReg;
1258
Register spectreRegToZero = InvalidReg;
1259
if (propTypes && !propTypes->unknownObject() &&
1260
propTypes->getObjectCount() > 0) {
1261
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1262
if (!val.constant()) {
1263
TypedOrValueRegister valReg = val.reg();
1264
if (valReg.hasValue()) {
1265
regs.take(valReg.valueReg());
1266
spectreRegToZero = valReg.valueReg().payloadOrValueReg();
1267
} else if (!valReg.typedReg().isFloat()) {
1268
regs.take(valReg.typedReg().gpr());
1269
spectreRegToZero = valReg.typedReg().gpr();
1270
}
1271
}
1272
regs.take(scratch1);
1273
objScratch = regs.takeAny();
1274
masm.Push(objScratch);
1275
}
1276
1277
bool checkTypeSet = true;
1278
Label failedFastPath;
1279
1280
if (propTypes && !propTypes->nonConstantProperty()) {
1281
masm.jump(&failedFastPath);
1282
}
1283
1284
if (val.constant()) {
1285
// If the input is a constant, then don't bother if the barrier will always
1286
// fail.
1287
if (!propTypes || !propTypes->hasType(TypeSet::GetValueType(val.value()))) {
1288
masm.jump(&failedFastPath);
1289
}
1290
checkTypeSet = false;
1291
} else {
1292
// We can do the same trick as above for primitive types of specialized
1293
// registers.
1294
TypedOrValueRegister reg = val.reg();
1295
if (reg.hasTyped() && reg.type() != MIRType::Object) {
1296
MIRType type = reg.type();
1297
if (!propTypes || !propTypes->hasType(TypeSet::PrimitiveType(type))) {
1298
masm.jump(&failedFastPath);
1299
}
1300
checkTypeSet = false;
1301
}
1302
}
1303
1304
Label done;
1305
if (checkTypeSet) {
1306
TypedOrValueRegister valReg = val.reg();
1307
if (propTypes) {
1308
// guardTypeSet can read from type sets without triggering read barriers.
1309
TypeSet::readBarrier(propTypes);
1310
masm.guardTypeSet(valReg, propTypes, BarrierKind::TypeSet, scratch1,
1311
objScratch, spectreRegToZero, &failedFastPath);
1312
masm.jump(&done);
1313
} else {
1314
masm.jump(&failedFastPath);
1315
}
1316
}
1317
1318
if (failedFastPath.used()) {
1319
// The inline type check failed. Do a callWithABI to check the current
1320
// TypeSet in case the type was added after we generated this stub.
1321
masm.bind(&failedFastPath);
1322
1323
AllocatableRegisterSet regs(GeneralRegisterSet::Volatile(),
1324
liveRegs.fpus());
1325
LiveRegisterSet save(regs.asLiveSet());
1326
masm.PushRegsInMask(save);
1327
1328
regs.takeUnchecked(scratch1);
1329
1330
// Push |val| first to make sure everything is fine if |val| aliases
1331
// scratch2.
1332
Register scratch2 = regs.takeAnyGeneral();
1333
masm.Push(val);
1334
masm.moveStackPtrTo(scratch2);
1335
1336
Register scratch3 = regs.takeAnyGeneral();
1337
masm.Push(id, scratch3);
1338
masm.moveStackPtrTo(scratch3);
1339
1340
masm.setupUnalignedABICall(scratch1);
1341
masm.movePtr(ImmGCPtr(group), scratch1);
1342
masm.passABIArg(scratch1);
1343
masm.passABIArg(scratch3);
1344
masm.passABIArg(scratch2);
1345
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GroupHasPropertyTypes));
1346
masm.mov(ReturnReg, scratch1);
1347
1348
masm.adjustStack(sizeof(Value) + sizeof(jsid));
1349
1350
LiveRegisterSet ignore;
1351
ignore.add(scratch1);
1352
masm.PopRegsInMaskIgnore(save, ignore);
1353
1354
masm.branchIfTrueBool(scratch1, &done);
1355
if (objScratch != InvalidReg) {
1356
masm.pop(objScratch);
1357
}
1358
masm.pop(obj);
1359
masm.jump(failures);
1360
}
1361
1362
masm.bind(&done);
1363
if (objScratch != InvalidReg) {
1364
masm.Pop(objScratch);
1365
}
1366
masm.Pop(obj);
1367
}
1368
1369
bool IonCacheIRCompiler::emitStoreFixedSlot() {
1370
JitSpew(JitSpew_Codegen, __FUNCTION__);
1371
Register obj = allocator.useRegister(masm, reader.objOperandId());
1372
int32_t offset = int32StubField(reader.stubOffset());
1373
ConstantOrRegister val =
1374
allocator.useConstantOrRegister(masm, reader.valOperandId());
1375
1376
Maybe<AutoScratchRegister> scratch;
1377
if (needsPostBarrier()) {
1378
scratch.emplace(allocator, masm);
1379
}
1380
1381
if (typeCheckInfo_->isSet()) {
1382
FailurePath* failure;
1383
if (!addFailurePath(&failure)) {
1384
return false;
1385
}
1386
1387
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
1388
failure->label());
1389
}
1390
1391
Address slot(obj, offset);
1392
EmitPreBarrier(masm, slot, MIRType::Value);
1393
masm.storeConstantOrRegister(val, slot);
1394
if (needsPostBarrier()) {
1395
emitPostBarrierSlot(obj, val, scratch.ref());
1396
}
1397
return true;
1398
}
1399
1400
bool IonCacheIRCompiler::emitStoreDynamicSlot() {
1401
JitSpew(JitSpew_Codegen, __FUNCTION__);
1402
Register obj = allocator.useRegister(masm, reader.objOperandId());
1403
int32_t offset = int32StubField(reader.stubOffset());
1404
ConstantOrRegister val =
1405
allocator.useConstantOrRegister(masm, reader.valOperandId());
1406
AutoScratchRegister scratch(allocator, masm);
1407
1408
if (typeCheckInfo_->isSet()) {
1409
FailurePath* failure;
1410
if (!addFailurePath(&failure)) {
1411
return false;
1412
}
1413
1414
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
1415
failure->label());
1416
}
1417
1418
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch);
1419
Address slot(scratch, offset);
1420
EmitPreBarrier(masm, slot, MIRType::Value);
1421
masm.storeConstantOrRegister(val, slot);
1422
if (needsPostBarrier()) {
1423
emitPostBarrierSlot(obj, val, scratch);
1424
}
1425
return true;
1426
}
1427
1428
bool IonCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op) {
1429
JitSpew(JitSpew_Codegen, __FUNCTION__);
1430
Register obj = allocator.useRegister(masm, reader.objOperandId());
1431
int32_t offset = int32StubField(reader.stubOffset());
1432
ConstantOrRegister val =
1433
allocator.useConstantOrRegister(masm, reader.valOperandId());
1434
1435
AutoScratchRegister scratch1(allocator, masm);
1436
1437
Maybe<AutoScratchRegister> scratch2;
1438
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1439
scratch2.emplace(allocator, masm);
1440
}
1441
1442
bool changeGroup = reader.readBool();
1443
ObjectGroup* newGroup = groupStubField(reader.stubOffset());
1444
Shape* newShape = shapeStubField(reader.stubOffset());
1445
1446
FailurePath* failure;
1447
if (!addFailurePath(&failure)) {
1448
return false;
1449
}
1450
1451
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, val, *liveRegs_,
1452
failure->label());
1453
1454
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1455
// We have to (re)allocate dynamic slots. Do this first, as it's the
1456
// only fallible operation here. Note that growSlotsPure is
1457
// fallible but does not GC.
1458
int32_t numNewSlots = int32StubField(reader.stubOffset());
1459
MOZ_ASSERT(numNewSlots > 0);
1460
1461
LiveRegisterSet save(GeneralRegisterSet::Volatile(),
1462
liveVolatileFloatRegs());
1463
masm.PushRegsInMask(save);
1464
1465
masm.setupUnalignedABICall(scratch1);
1466
masm.loadJSContext(scratch1);
1467
masm.passABIArg(scratch1);
1468
masm.passABIArg(obj);
1469
masm.move32(Imm32(numNewSlots), scratch2.ref());
1470
masm.passABIArg(scratch2.ref());
1471
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsPure));
1472
masm.mov(ReturnReg, scratch1);
1473
1474
LiveRegisterSet ignore;
1475
ignore.add(scratch1);
1476
masm.PopRegsInMaskIgnore(save, ignore);
1477
1478
masm.branchIfFalseBool(scratch1, failure->label());
1479
}
1480
1481
if (changeGroup) {
1482
// Changing object's group from a partially to fully initialized group,
1483
// per the acquired properties analysis. Only change the group if the
1484
// old group still has a newScript. This only applies to PlainObjects.
1485
Label noGroupChange;
1486
masm.branchIfObjGroupHasNoAddendum(obj, scratch1, &noGroupChange);
1487
1488
// Update the object's group.
1489
masm.storeObjGroup(newGroup, obj,
1490
[](MacroAssembler& masm, const Address& addr) {
1491
EmitPreBarrier(masm, addr, MIRType::ObjectGroup);
1492
});
1493
1494
masm.bind(&noGroupChange);
1495
}
1496
1497
// Update the object's shape.
1498
masm.storeObjShape(newShape, obj,
1499
[](MacroAssembler& masm, const Address& addr) {
1500
EmitPreBarrier(masm, addr, MIRType::Shape);
1501
});
1502
1503
// Perform the store. No pre-barrier required since this is a new
1504
// initialization.
1505
if (op == CacheOp::AddAndStoreFixedSlot) {
1506
Address slot(obj, offset);
1507
masm.storeConstantOrRegister(val, slot);
1508
} else {
1509
MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1510
op == CacheOp::AllocateAndStoreDynamicSlot);
1511
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
1512
Address slot(scratch1, offset);
1513
masm.storeConstantOrRegister(val, slot);
1514
}
1515
1516
if (needsPostBarrier()) {
1517
emitPostBarrierSlot(obj, val, scratch1);
1518
}
1519
1520
return true;
1521
}
1522
1523
bool IonCacheIRCompiler::emitAddAndStoreFixedSlot() {
1524
JitSpew(JitSpew_Codegen, __FUNCTION__);
1525
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
1526
}
1527
1528
bool IonCacheIRCompiler::emitAddAndStoreDynamicSlot() {
1529
JitSpew(JitSpew_Codegen, __FUNCTION__);
1530
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
1531
}
1532
1533
bool IonCacheIRCompiler::emitAllocateAndStoreDynamicSlot() {
1534
JitSpew(JitSpew_Codegen, __FUNCTION__);
1535
return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
1536
}
1537
1538
bool IonCacheIRCompiler::emitStoreTypedObjectReferenceProperty() {
1539
JitSpew(JitSpew_Codegen, __FUNCTION__);
1540
Register obj = allocator.useRegister(masm, reader.objOperandId());
1541
int32_t offset = int32StubField(reader.stubOffset());
1542
TypedThingLayout layout = reader.typedThingLayout();
1543
ReferenceType type = reader.referenceTypeDescrType();
1544
1545
ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1546
1547
AutoScratchRegister scratch1(allocator, masm);
1548
AutoScratchRegister scratch2(allocator, masm);
1549
1550
// We don't need to check property types if the property is always a
1551
// string.
1552
if (type != ReferenceType::TYPE_STRING) {
1553
FailurePath* failure;
1554
if (!addFailurePath(&failure)) {
1555
return false;
1556
}
1557
EmitCheckPropertyTypes(masm, typeCheckInfo_, obj, TypedOrValueRegister(val),
1558
*liveRegs_, failure->label());
1559
}
1560
1561
// Compute the address being written to.
1562
LoadTypedThingData(masm, layout, obj, scratch1);
1563
Address dest(scratch1, offset);
1564
1565
emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
1566
1567
if (needsPostBarrier() && type != ReferenceType::TYPE_STRING) {
1568
emitPostBarrierSlot(obj, val, scratch1);
1569
}
1570
return true;
1571
}
1572
1573
static void EmitStoreDenseElement(MacroAssembler& masm,
1574
const ConstantOrRegister& value,
1575
Register elements,
1576
BaseObjectElementIndex target) {
1577
// If the ObjectElements::CONVERT_DOUBLE_ELEMENTS flag is set, int32 values
1578
// have to be converted to double first. If the value is not int32, it can
1579
// always be stored directly.
1580
1581
Address elementsFlags(elements, ObjectElements::offsetOfFlags());
1582
if (value.constant()) {
1583
Value v = value.value();
1584
Label done;
1585
if (v.isInt32()) {
1586
Label dontConvert;
1587
masm.branchTest32(Assembler::Zero, elementsFlags,
1588
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
1589
&dontConvert);
1590
masm.storeValue(DoubleValue(v.toInt32()), target);
1591
masm.jump(&done);
1592
masm.bind(&dontConvert);
1593
}
1594
masm.storeValue(v, target);
1595
masm.bind(&done);
1596
return;
1597
}
1598
1599
TypedOrValueRegister reg = value.reg();
1600
if (reg.hasTyped() && reg.type() != MIRType::Int32) {
1601
masm.storeTypedOrValue(reg, target);
1602
return;
1603
}
1604
1605
Label convert, storeValue, done;
1606
masm.branchTest32(Assembler::NonZero, elementsFlags,
1607
Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS), &convert);
1608
masm.bind(&storeValue);