Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/MacroAssembler-inl.h"
8
9
#include "mozilla/CheckedInt.h"
10
#include "mozilla/MathAlgorithms.h"
11
12
#include "jsfriendapi.h"
13
14
#include "builtin/TypedObject.h"
15
#include "gc/GCTrace.h"
16
#include "jit/AtomicOp.h"
17
#include "jit/Bailouts.h"
18
#include "jit/BaselineFrame.h"
19
#include "jit/BaselineIC.h"
20
#include "jit/BaselineJIT.h"
21
#include "jit/JitOptions.h"
22
#include "jit/Lowering.h"
23
#include "jit/MIR.h"
24
#include "jit/MoveEmitter.h"
25
#include "jit/Simulator.h"
26
#include "js/Conversions.h"
27
#include "js/Printf.h"
28
#include "vm/TraceLogging.h"
29
30
#include "gc/Nursery-inl.h"
31
#include "jit/shared/Lowering-shared-inl.h"
32
#include "jit/TemplateObject-inl.h"
33
#include "vm/Interpreter-inl.h"
34
#include "vm/JSObject-inl.h"
35
#include "vm/TypeInference-inl.h"
36
37
using namespace js;
38
using namespace js::jit;
39
40
using JS::GenericNaN;
41
using JS::ToInt32;
42
43
using mozilla::CheckedUint32;
44
45
template <typename T>
46
static void EmitTypeCheck(MacroAssembler& masm, Assembler::Condition cond,
47
const T& src, TypeSet::Type type, Label* label) {
48
if (type.isAnyObject()) {
49
masm.branchTestObject(cond, src, label);
50
return;
51
}
52
switch (type.primitive()) {
53
case ValueType::Double:
54
// TI double type includes int32.
55
masm.branchTestNumber(cond, src, label);
56
break;
57
case ValueType::Int32:
58
masm.branchTestInt32(cond, src, label);
59
break;
60
case ValueType::Boolean:
61
masm.branchTestBoolean(cond, src, label);
62
break;
63
case ValueType::String:
64
masm.branchTestString(cond, src, label);
65
break;
66
case ValueType::Symbol:
67
masm.branchTestSymbol(cond, src, label);
68
break;
69
case ValueType::BigInt:
70
masm.branchTestBigInt(cond, src, label);
71
break;
72
case ValueType::Null:
73
masm.branchTestNull(cond, src, label);
74
break;
75
case ValueType::Undefined:
76
masm.branchTestUndefined(cond, src, label);
77
break;
78
case ValueType::Magic:
79
masm.branchTestMagic(cond, src, label);
80
break;
81
case ValueType::PrivateGCThing:
82
case ValueType::Object:
83
MOZ_CRASH("Unexpected type");
84
}
85
}
86
87
template <typename Source>
88
void MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types,
89
BarrierKind kind, Register unboxScratch,
90
Register objScratch,
91
Register spectreRegToZero, Label* miss) {
92
// unboxScratch may be InvalidReg on 32-bit platforms. It should only be
93
// used for extracting the Value tag or payload.
94
//
95
// objScratch may be InvalidReg if the TypeSet does not contain specific
96
// objects to guard on. It should only be used for guardObjectType.
97
//
98
// spectreRegToZero is a register that will be zeroed by guardObjectType on
99
// speculatively executed paths.
100
101
MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
102
MOZ_ASSERT(!types->unknown());
103
104
Label matched;
105
TypeSet::Type tests[] = {TypeSet::Int32Type(), TypeSet::UndefinedType(),
106
TypeSet::BooleanType(), TypeSet::StringType(),
107
TypeSet::SymbolType(), TypeSet::BigIntType(),
108
TypeSet::NullType(), TypeSet::MagicArgType(),
109
TypeSet::AnyObjectType()};
110
111
// The double type also implies Int32.
112
// So replace the int32 test with the double one.
113
if (types->hasType(TypeSet::DoubleType())) {
114
MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
115
tests[0] = TypeSet::DoubleType();
116
}
117
118
unsigned numBranches = 0;
119
for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
120
if (types->hasType(tests[i])) {
121
numBranches++;
122
}
123
}
124
125
if (!types->unknownObject() && types->getObjectCount() > 0) {
126
numBranches++;
127
}
128
129
if (numBranches == 0) {
130
MOZ_ASSERT(types->empty());
131
jump(miss);
132
return;
133
}
134
135
Register tag = extractTag(address, unboxScratch);
136
137
// Emit all typed tests.
138
for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
139
if (!types->hasType(tests[i])) {
140
continue;
141
}
142
143
if (--numBranches > 0) {
144
EmitTypeCheck(*this, Equal, tag, tests[i], &matched);
145
} else {
146
EmitTypeCheck(*this, NotEqual, tag, tests[i], miss);
147
}
148
}
149
150
// If we don't have specific objects to check for, we're done.
151
if (numBranches == 0) {
152
MOZ_ASSERT(types->unknownObject() || types->getObjectCount() == 0);
153
bind(&matched);
154
return;
155
}
156
157
// Test specific objects.
158
MOZ_ASSERT(objScratch != InvalidReg);
159
MOZ_ASSERT(objScratch != unboxScratch);
160
161
MOZ_ASSERT(numBranches == 1);
162
branchTestObject(NotEqual, tag, miss);
163
164
if (kind != BarrierKind::TypeTagOnly) {
165
Register obj = extractObject(address, unboxScratch);
166
guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
167
} else {
168
#ifdef DEBUG
169
Label fail;
170
Register obj = extractObject(address, unboxScratch);
171
guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
172
jump(&matched);
173
174
bind(&fail);
175
guardTypeSetMightBeIncomplete(types, obj, objScratch, &matched);
176
assumeUnreachable("Unexpected object type");
177
#endif
178
}
179
180
bind(&matched);
181
}
182
183
#ifdef DEBUG
184
// guardTypeSetMightBeIncomplete is only used in DEBUG builds. If this ever
185
// changes, we need to make sure it's Spectre-safe.
186
void MacroAssembler::guardTypeSetMightBeIncomplete(const TypeSet* types,
187
Register obj,
188
Register scratch,
189
Label* label) {
190
// Type set guards might miss when an object's group changes. In this case
191
// either its old group's properties will become unknown, or it will change
192
// to a native object with an original unboxed group. Jump to label if this
193
// might have happened for the input object.
194
195
if (types->unknownObject()) {
196
jump(label);
197
return;
198
}
199
200
for (size_t i = 0; i < types->getObjectCount(); i++) {
201
if (JSObject* singleton = getSingletonAndDelayBarrier(types, i)) {
202
movePtr(ImmGCPtr(singleton), scratch);
203
loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
204
} else if (ObjectGroup* group = getGroupAndDelayBarrier(types, i)) {
205
movePtr(ImmGCPtr(group), scratch);
206
} else {
207
continue;
208
}
209
branchTest32(Assembler::NonZero,
210
Address(scratch, ObjectGroup::offsetOfFlags()),
211
Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
212
}
213
}
214
#endif
215
216
void MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
217
Register scratch,
218
Register spectreRegToZero, Label* miss) {
219
MOZ_ASSERT(obj != scratch);
220
MOZ_ASSERT(!types->unknown());
221
MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
222
MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
223
224
// Note: this method elides read barriers on values read from type sets, as
225
// this may be called off thread during Ion compilation. This is
226
// safe to do as the final JitCode object will be allocated during the
227
// incremental GC (or the compilation canceled before we start sweeping),
228
// see CodeGenerator::link. Other callers should use TypeSet::readBarrier
229
// to trigger the barrier on the contents of type sets passed in here.
230
Label matched;
231
232
bool hasSingletons = false;
233
bool hasObjectGroups = false;
234
unsigned numBranches = 0;
235
236
unsigned count = types->getObjectCount();
237
for (unsigned i = 0; i < count; i++) {
238
if (types->hasGroup(i)) {
239
hasObjectGroups = true;
240
numBranches++;
241
} else if (types->hasSingleton(i)) {
242
hasSingletons = true;
243
numBranches++;
244
}
245
}
246
247
if (numBranches == 0) {
248
jump(miss);
249
return;
250
}
251
252
if (JitOptions.spectreObjectMitigationsBarriers) {
253
move32(Imm32(0), scratch);
254
}
255
256
if (hasSingletons) {
257
for (unsigned i = 0; i < count; i++) {
258
JSObject* singleton = getSingletonAndDelayBarrier(types, i);
259
if (!singleton) {
260
continue;
261
}
262
263
if (JitOptions.spectreObjectMitigationsBarriers) {
264
if (--numBranches > 0) {
265
Label next;
266
branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
267
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
268
jump(&matched);
269
bind(&next);
270
} else {
271
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
272
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
273
}
274
} else {
275
if (--numBranches > 0) {
276
branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
277
} else {
278
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
279
}
280
}
281
}
282
}
283
284
if (hasObjectGroups) {
285
comment("has object groups");
286
287
// If Spectre mitigations are enabled, we use the scratch register as
288
// zero register. Without mitigations we can use it to store the group.
289
Address groupAddr(obj, JSObject::offsetOfGroup());
290
if (!JitOptions.spectreObjectMitigationsBarriers) {
291
loadPtr(groupAddr, scratch);
292
}
293
294
for (unsigned i = 0; i < count; i++) {
295
ObjectGroup* group = getGroupAndDelayBarrier(types, i);
296
if (!group) {
297
continue;
298
}
299
300
if (!pendingObjectGroupReadBarriers_.append(group)) {
301
setOOM();
302
return;
303
}
304
305
if (JitOptions.spectreObjectMitigationsBarriers) {
306
if (--numBranches > 0) {
307
Label next;
308
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
309
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
310
jump(&matched);
311
bind(&next);
312
} else {
313
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), miss);
314
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
315
}
316
} else {
317
if (--numBranches > 0) {
318
branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
319
} else {
320
branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
321
}
322
}
323
}
324
}
325
326
MOZ_ASSERT(numBranches == 0);
327
328
bind(&matched);
329
}
330
331
template void MacroAssembler::guardTypeSet(
332
const Address& address, const TypeSet* types, BarrierKind kind,
333
Register unboxScratch, Register objScratch, Register spectreRegToZero,
334
Label* miss);
335
template void MacroAssembler::guardTypeSet(
336
const ValueOperand& value, const TypeSet* types, BarrierKind kind,
337
Register unboxScratch, Register objScratch, Register spectreRegToZero,
338
Label* miss);
339
template void MacroAssembler::guardTypeSet(
340
const TypedOrValueRegister& value, const TypeSet* types, BarrierKind kind,
341
Register unboxScratch, Register objScratch, Register spectreRegToZero,
342
Label* miss);
343
344
template <typename S, typename T>
345
static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
346
const S& value, const T& dest) {
347
switch (arrayType) {
348
case Scalar::Float32:
349
masm.storeFloat32(value, dest);
350
break;
351
case Scalar::Float64:
352
masm.storeDouble(value, dest);
353
break;
354
default:
355
MOZ_CRASH("Invalid typed array type");
356
}
357
}
358
359
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
360
FloatRegister value,
361
const BaseIndex& dest) {
362
StoreToTypedFloatArray(*this, arrayType, value, dest);
363
}
364
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
365
FloatRegister value,
366
const Address& dest) {
367
StoreToTypedFloatArray(*this, arrayType, value, dest);
368
}
369
370
template <typename T>
371
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
372
AnyRegister dest, Register temp,
373
Label* fail, bool canonicalizeDoubles) {
374
switch (arrayType) {
375
case Scalar::Int8:
376
load8SignExtend(src, dest.gpr());
377
break;
378
case Scalar::Uint8:
379
case Scalar::Uint8Clamped:
380
load8ZeroExtend(src, dest.gpr());
381
break;
382
case Scalar::Int16:
383
load16SignExtend(src, dest.gpr());
384
break;
385
case Scalar::Uint16:
386
load16ZeroExtend(src, dest.gpr());
387
break;
388
case Scalar::Int32:
389
load32(src, dest.gpr());
390
break;
391
case Scalar::Uint32:
392
if (dest.isFloat()) {
393
load32(src, temp);
394
convertUInt32ToDouble(temp, dest.fpu());
395
} else {
396
load32(src, dest.gpr());
397
398
// Bail out if the value doesn't fit into a signed int32 value. This
399
// is what allows MLoadUnboxedScalar to have a type() of
400
// MIRType::Int32 for UInt32 array loads.
401
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
402
}
403
break;
404
case Scalar::BigInt64:
405
case Scalar::BigUint64:
407
jump(fail);
408
break;
409
case Scalar::Float32:
410
loadFloat32(src, dest.fpu());
411
canonicalizeFloat(dest.fpu());
412
break;
413
case Scalar::Float64:
414
loadDouble(src, dest.fpu());
415
if (canonicalizeDoubles) {
416
canonicalizeDouble(dest.fpu());
417
}
418
break;
419
default:
420
MOZ_CRASH("Invalid typed array type");
421
}
422
}
423
424
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
425
const Address& src,
426
AnyRegister dest,
427
Register temp, Label* fail,
428
bool canonicalizeDoubles);
429
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
430
const BaseIndex& src,
431
AnyRegister dest,
432
Register temp, Label* fail,
433
bool canonicalizeDoubles);
434
435
template <typename T>
436
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
437
const ValueOperand& dest,
438
bool allowDouble, Register temp,
439
Label* fail) {
440
switch (arrayType) {
441
case Scalar::Int8:
442
case Scalar::Uint8:
443
case Scalar::Uint8Clamped:
444
case Scalar::Int16:
445
case Scalar::Uint16:
446
case Scalar::Int32:
447
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
448
InvalidReg, nullptr);
449
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
450
break;
451
case Scalar::Uint32:
452
// Don't clobber dest when we could fail, instead use temp.
453
load32(src, temp);
454
if (allowDouble) {
455
// If the value fits in an int32, store an int32 type tag.
456
// Else, convert the value to double and box it.
457
Label done, isDouble;
458
branchTest32(Assembler::Signed, temp, temp, &isDouble);
459
{
460
tagValue(JSVAL_TYPE_INT32, temp, dest);
461
jump(&done);
462
}
463
bind(&isDouble);
464
{
465
ScratchDoubleScope fpscratch(*this);
466
convertUInt32ToDouble(temp, fpscratch);
467
boxDouble(fpscratch, dest, fpscratch);
468
}
469
bind(&done);
470
} else {
471
// Bailout if the value does not fit in an int32.
472
branchTest32(Assembler::Signed, temp, temp, fail);
473
tagValue(JSVAL_TYPE_INT32, temp, dest);
474
}
475
break;
476
case Scalar::Float32: {
477
ScratchDoubleScope dscratch(*this);
478
FloatRegister fscratch = dscratch.asSingle();
479
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
480
dest.scratchReg(), nullptr);
481
convertFloat32ToDouble(fscratch, dscratch);
482
boxDouble(dscratch, dest, dscratch);
483
break;
484
}
485
case Scalar::Float64: {
486
ScratchDoubleScope fpscratch(*this);
487
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
488
dest.scratchReg(), nullptr);
489
boxDouble(fpscratch, dest, fpscratch);
490
break;
491
}
493
case Scalar::BigInt64:
494
case Scalar::BigUint64: {
495
jump(fail);
496
break;
497
}
498
default:
499
MOZ_CRASH("Invalid typed array type");
500
}
501
}
502
503
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
504
const Address& src,
505
const ValueOperand& dest,
506
bool allowDouble,
507
Register temp, Label* fail);
508
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
509
const BaseIndex& src,
510
const ValueOperand& dest,
511
bool allowDouble,
512
Register temp, Label* fail);
513
514
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
515
// and bails for anything that cannot be handled with our jit allocators.
516
void MacroAssembler::checkAllocatorState(Label* fail) {
517
// Don't execute the inline path if we are tracing allocations.
518
if (js::gc::gcTracer.traceEnabled()) {
519
jump(fail);
520
}
521
522
#ifdef JS_GC_ZEAL
523
// Don't execute the inline path if gc zeal or tracing are active.
524
const uint32_t* ptrZealModeBits =
525
GetJitContext()->runtime->addressOfGCZealModeBits();
526
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
527
fail);
528
#endif
529
530
// Don't execute the inline path if the realm has an object metadata callback,
531
// as the metadata to use for the object may vary between executions of the
532
// op.
533
if (GetJitContext()->realm()->hasAllocationMetadataBuilder()) {
534
jump(fail);
535
}
536
}
537
538
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
539
gc::InitialHeap initialHeap) {
540
// Note that Ion elides barriers on writes to objects known to be in the
541
// nursery, so any allocation that can be made into the nursery must be made
542
// into the nursery, even if the nursery is disabled. At runtime these will
543
// take the out-of-line path, which is required to insert a barrier for the
544
// initializing writes.
545
return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
546
}
547
548
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
549
// this fills in the slots_ pointer.
550
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
551
gc::AllocKind allocKind,
552
size_t nDynamicSlots, Label* fail) {
553
MOZ_ASSERT(IsNurseryAllocable(allocKind));
554
555
// We still need to allocate in the nursery, per the comment in
556
// shouldNurseryAllocate; however, we need to insert into the
557
// mallocedBuffers set, so bail to do the nursery allocation in the
558
// interpreter.
559
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
560
jump(fail);
561
return;
562
}
563
564
// No explicit check for nursery.isEnabled() is needed, as the comparison
565
// with the nursery's end will always fail in such cases.
566
CompileZone* zone = GetJitContext()->realm()->zone();
567
size_t thingSize = gc::Arena::thingSize(allocKind);
568
size_t totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
569
MOZ_ASSERT(totalSize < INT32_MAX);
570
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
571
572
bumpPointerAllocate(result, temp, fail, zone->addressOfNurseryPosition(),
573
zone->addressOfNurseryCurrentEnd(), totalSize, totalSize);
574
575
if (nDynamicSlots) {
576
computeEffectiveAddress(Address(result, thingSize), temp);
577
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
578
}
579
}
580
581
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
582
void MacroAssembler::freeListAllocate(Register result, Register temp,
583
gc::AllocKind allocKind, Label* fail) {
584
CompileZone* zone = GetJitContext()->realm()->zone();
585
int thingSize = int(gc::Arena::thingSize(allocKind));
586
587
Label fallback;
588
Label success;
589
590
// Load the first and last offsets of |zone|'s free list for |allocKind|.
591
// If there is no room remaining in the span, fall back to get the next one.
592
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
593
loadPtr(AbsoluteAddress(ptrFreeList), temp);
594
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
595
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
596
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
597
598
// Bump the offset for the next allocation.
599
add32(Imm32(thingSize), result);
600
loadPtr(AbsoluteAddress(ptrFreeList), temp);
601
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
602
sub32(Imm32(thingSize), result);
603
addPtr(temp, result); // Turn the offset into a pointer.
604
jump(&success);
605
606
bind(&fallback);
607
// If there are no free spans left, we bail to finish the allocation. The
608
// interpreter will call the GC allocator to set up a new arena to allocate
609
// from, after which we can resume allocating in the jit.
610
branchTest32(Assembler::Zero, result, result, fail);
611
loadPtr(AbsoluteAddress(ptrFreeList), temp);
612
addPtr(temp, result); // Turn the offset into a pointer.
613
Push(result);
614
// Update the free list to point to the next span (which may be empty).
615
load32(Address(result, 0), result);
616
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
617
Pop(result);
618
619
bind(&success);
620
621
if (GetJitContext()->runtime->geckoProfiler().enabled()) {
622
uint32_t* countAddress =
623
GetJitContext()->runtime->addressOfTenuredAllocCount();
624
movePtr(ImmPtr(countAddress), temp);
625
add32(Imm32(1), Address(temp, 0));
626
}
627
}
628
629
void MacroAssembler::callFreeStub(Register slots) {
630
// This register must match the one in JitRuntime::generateFreeStub.
631
const Register regSlots = CallTempReg0;
632
633
push(regSlots);
634
movePtr(slots, regSlots);
635
call(GetJitContext()->runtime->jitRuntime()->freeStub());
636
pop(regSlots);
637
}
638
639
// Inlined equivalent of gc::AllocateObject, without failure case handling.
640
void MacroAssembler::allocateObject(Register result, Register temp,
641
gc::AllocKind allocKind,
642
uint32_t nDynamicSlots,
643
gc::InitialHeap initialHeap, Label* fail) {
644
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
645
646
checkAllocatorState(fail);
647
648
if (shouldNurseryAllocate(allocKind, initialHeap)) {
649
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
650
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail);
651
}
652
653
// Fall back to calling into the VM to allocate objects in the tenured heap
654
// that have dynamic slots.
655
if (nDynamicSlots) {
656
jump(fail);
657
return;
658
}
659
660
return freeListAllocate(result, temp, allocKind, fail);
661
}
662
663
void MacroAssembler::createGCObject(Register obj, Register temp,
664
const TemplateObject& templateObj,
665
gc::InitialHeap initialHeap, Label* fail,
666
bool initContents) {
667
gc::AllocKind allocKind = templateObj.getAllocKind();
668
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
669
670
uint32_t nDynamicSlots = 0;
671
if (templateObj.isNative()) {
672
const NativeTemplateObject& ntemplate =
673
templateObj.asNativeTemplateObject();
674
nDynamicSlots = ntemplate.numDynamicSlots();
675
676
// Arrays with copy on write elements do not need fixed space for an
677
// elements header. The template object, which owns the original
678
// elements, might have another allocation kind.
679
if (ntemplate.denseElementsAreCopyOnWrite()) {
680
allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
681
}
682
}
683
684
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
685
initGCThing(obj, temp, templateObj, initContents);
686
}
687
688
// Inlined equivalent of gc::AllocateNonObject, without failure case handling.
689
// Non-object allocation does not need to worry about slots, so can take a
690
// simpler path.
691
void MacroAssembler::allocateNonObject(Register result, Register temp,
692
gc::AllocKind allocKind, Label* fail) {
693
checkAllocatorState(fail);
694
freeListAllocate(result, temp, allocKind, fail);
695
}
696
697
// Inline version of Nursery::allocateString.
698
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
699
gc::AllocKind allocKind,
700
Label* fail) {
701
MOZ_ASSERT(IsNurseryAllocable(allocKind));
702
703
// No explicit check for nursery.isEnabled() is needed, as the comparison
704
// with the nursery's end will always fail in such cases.
705
706
CompileZone* zone = GetJitContext()->realm()->zone();
707
size_t thingSize = gc::Arena::thingSize(allocKind);
708
size_t totalSize = js::Nursery::stringHeaderSize() + thingSize;
709
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
710
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
711
712
bumpPointerAllocate(
713
result, temp, fail, zone->addressOfStringNurseryPosition(),
714
zone->addressOfStringNurseryCurrentEnd(), totalSize, thingSize);
715
storePtr(ImmPtr(zone), Address(result, -js::Nursery::stringHeaderSize()));
716
}
717
718
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
719
Label* fail, void* posAddr,
720
const void* curEndAddr,
721
uint32_t totalSize, uint32_t size) {
722
// The position (allocation pointer) and the end pointer are stored
723
// very close to each other -- specifically, easily within a 32 bit offset.
724
// Use relative offsets between them, to avoid 64-bit immediate loads.
725
//
726
// I tried to optimise this further by using an extra register to avoid
727
// the final subtraction and hopefully get some more instruction
728
// parallelism, but it made no difference.
729
movePtr(ImmPtr(posAddr), temp);
730
loadPtr(Address(temp, 0), result);
731
addPtr(Imm32(totalSize), result);
732
CheckedInt<int32_t> endOffset =
733
(CheckedInt<uintptr_t>(uintptr_t(curEndAddr)) -
734
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
735
.toChecked<int32_t>();
736
MOZ_ASSERT(endOffset.isValid(), "Position and end pointers must be nearby");
737
branchPtr(Assembler::Below, Address(temp, endOffset.value()), result, fail);
738
storePtr(result, Address(temp, 0));
739
subPtr(Imm32(size), result);
740
741
if (GetJitContext()->runtime->geckoProfiler().enabled()) {
742
CompileZone* zone = GetJitContext()->realm()->zone();
743
uint32_t* countAddress = zone->addressOfNurseryAllocCount();
744
CheckedInt<int32_t> counterOffset =
745
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
746
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
747
.toChecked<int32_t>();
748
if (counterOffset.isValid()) {
749
add32(Imm32(1), Address(temp, counterOffset.value()));
750
} else {
751
movePtr(ImmPtr(countAddress), temp);
752
add32(Imm32(1), Address(temp, 0));
753
}
754
}
755
}
756
757
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
758
// allocation requested but unsuccessful.
759
void MacroAssembler::allocateString(Register result, Register temp,
760
gc::AllocKind allocKind,
761
gc::InitialHeap initialHeap, Label* fail) {
762
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
763
allocKind == gc::AllocKind::FAT_INLINE_STRING);
764
765
checkAllocatorState(fail);
766
767
if (shouldNurseryAllocate(allocKind, initialHeap)) {
768
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
769
return nurseryAllocateString(result, temp, allocKind, fail);
770
}
771
772
freeListAllocate(result, temp, allocKind, fail);
773
}
774
775
void MacroAssembler::newGCString(Register result, Register temp, Label* fail,
776
bool attemptNursery) {
777
allocateString(result, temp, js::gc::AllocKind::STRING,
778
attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
779
}
780
781
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
782
Label* fail, bool attemptNursery) {
783
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
784
attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
785
}
786
787
void MacroAssembler::copySlotsFromTemplate(
788
Register obj, const NativeTemplateObject& templateObj, uint32_t start,
789
uint32_t end) {
790
uint32_t nfixed = Min(templateObj.numFixedSlots(), end);
791
for (unsigned i = start; i < nfixed; i++) {
792
// Template objects are not exposed to script and therefore immutable.
793
// However, regexp template objects are sometimes used directly (when
794
// the cloning is not observable), and therefore we can end up with a
795
// non-zero lastIndex. Detect this case here and just substitute 0, to
796
// avoid racing with the main thread updating this slot.
797
Value v;
798
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
799
v = Int32Value(0);
800
} else {
801
v = templateObj.getSlot(i);
802
}
803
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
804
}
805
}
806
807
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
808
uint32_t start, uint32_t end,
809
const Value& v) {
810
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
811
812
if (start >= end) {
813
return;
814
}
815
816
#ifdef JS_NUNBOX32
817
// We only have a single spare register, so do the initialization as two
818
// strided writes of the tag and body.
819
Address addr = base;
820
move32(Imm32(v.toNunboxPayload()), temp);
821
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
822
store32(temp, ToPayload(addr));
823
}
824
825
addr = base;
826
move32(Imm32(v.toNunboxTag()), temp);
827
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
828
store32(temp, ToType(addr));
829
}
830
#else
831
moveValue(v, ValueOperand(temp));
832
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue)) {
833
storePtr(temp, base);
834
}
835
#endif
836
}
837
838
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
839
uint32_t start, uint32_t end) {
840
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
841
}
842
843
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
844
uint32_t start, uint32_t end) {
845
fillSlotsWithConstantValue(base, temp, start, end,
846
MagicValue(JS_UNINITIALIZED_LEXICAL));
847
}
848
849
static void FindStartOfUninitializedAndUndefinedSlots(
850
const NativeTemplateObject& templateObj, uint32_t nslots,
851
uint32_t* startOfUninitialized, uint32_t* startOfUndefined) {
852
MOZ_ASSERT(nslots == templateObj.slotSpan());
853
MOZ_ASSERT(nslots > 0);
854
855
uint32_t first = nslots;
856
for (; first != 0; --first) {
857
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
858
break;
859
}
860
}
861
*startOfUndefined = first;
862
863
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
864
for (; first != 0; --first) {
865
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
866
break;
867
}
868
}
869
*startOfUninitialized = first;
870
} else {
871
*startOfUninitialized = *startOfUndefined;
872
}
873
}
874
875
static void AllocateAndInitTypedArrayBuffer(JSContext* cx,
876
TypedArrayObject* obj,
877
int32_t count) {
878
AutoUnsafeCallWithABI unsafe;
879
880
obj->initPrivate(nullptr);
881
882
// Negative numbers or zero will bail out to the slow path, which in turn will
883
// raise an invalid argument exception or create a correct object with zero
884
// elements.
885
if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
886
obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
887
return;
888
}
889
890
obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
891
892
size_t nbytes = count * obj->bytesPerElement();
893
MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
894
"JS_ROUNDUP must not overflow");
895
896
nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
897
void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
898
js::ArrayBufferContentsArena);
899
if (buf) {
900
InitObjectPrivate(obj, buf, nbytes, MemoryUse::TypedArrayElements);
901
}
902
}
903
904
void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
905
Register lengthReg,
906
LiveRegisterSet liveRegs, Label* fail,
907
TypedArrayObject* templateObj,
908
TypedArrayLength lengthKind) {
909
MOZ_ASSERT(templateObj->hasPrivate());
910
MOZ_ASSERT(!templateObj->hasBuffer());
911
912
constexpr size_t dataSlotOffset = TypedArrayObject::dataOffset();
913
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
914
915
static_assert(
916
TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
917
"fixed inline element data assumed to begin after the data slot");
918
919
static_assert(
920
TypedArrayObject::INLINE_BUFFER_LIMIT ==
921
JSObject::MAX_BYTE_SIZE - dataOffset,
922
"typed array inline buffer is limited by the maximum object byte size");
923
924
// Initialise data elements to zero.
925
int32_t length = templateObj->length();
926
size_t nbytes = length * templateObj->bytesPerElement();
927
928
if (lengthKind == TypedArrayLength::Fixed &&
929
nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
930
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
931
932
// Store data elements inside the remaining JSObject slots.
933
computeEffectiveAddress(Address(obj, dataOffset), temp);
934
storePtr(temp, Address(obj, dataSlotOffset));
935
936
// Write enough zero pointers into fixed data to zero every
937
// element. (This zeroes past the end of a byte count that's
938
// not a multiple of pointer size. That's okay, because fixed
939
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
940
// and we won't inline unless the desired memory fits in that
941
// space.)
942
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
943
944
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
945
for (size_t i = 0; i < numZeroPointers; i++) {
946
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
947
}
948
#ifdef DEBUG
949
if (nbytes == 0) {
950
store8(Imm32(TypedArrayObject::ZeroLengthArrayData),
951
Address(obj, dataSlotOffset));
952
}
953
#endif
954
} else {
955
if (lengthKind == TypedArrayLength::Fixed) {
956
move32(Imm32(length), lengthReg);
957
}
958
959
// Allocate a buffer on the heap to store the data elements.
960
liveRegs.addUnchecked(temp);
961
liveRegs.addUnchecked(obj);
962
liveRegs.addUnchecked(lengthReg);
963
PushRegsInMask(liveRegs);
964
setupUnalignedABICall(temp);
965
loadJSContext(temp);
966
passABIArg(temp);
967
passABIArg(obj);
968
passABIArg(lengthReg);
969
callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateAndInitTypedArrayBuffer));
970
PopRegsInMask(liveRegs);
971
972
// Fail when data elements is set to NULL.
973
branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
974
}
975
}
976
977
void MacroAssembler::initGCSlots(Register obj, Register temp,
978
const NativeTemplateObject& templateObj,
979
bool initContents) {
980
// Slots of non-array objects are required to be initialized.
981
// Use the values currently in the template object.
982
uint32_t nslots = templateObj.slotSpan();
983
if (nslots == 0) {
984
return;
985
}
986
987
uint32_t nfixed = templateObj.numUsedFixedSlots();
988
uint32_t ndynamic = templateObj.numDynamicSlots();
989
990
// Attempt to group slot writes such that we minimize the amount of
991
// duplicated data we need to embed in code and load into registers. In
992
// general, most template object slots will be undefined except for any
993
// reserved slots. Since reserved slots come first, we split the object
994
// logically into independent non-UndefinedValue writes to the head and
995
// duplicated writes of UndefinedValue to the tail. For the majority of
996
// objects, the "tail" will be the entire slot range.
997
//
998
// The template object may be a CallObject, in which case we need to
999
// account for uninitialized lexical slots as well as undefined
1000
// slots. Unitialized lexical slots appears in CallObjects if the function
1001
// has parameter expressions, in which case closed over parameters have
1002
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
1003
uint32_t startOfUninitialized = nslots;
1004
uint32_t startOfUndefined = nslots;
1005
FindStartOfUninitializedAndUndefinedSlots(
1006
templateObj, nslots, &startOfUninitialized, &startOfUndefined);
1007
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
1008
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
1009
MOZ_ASSERT_IF(!templateObj.isCallObject(),
1010
startOfUninitialized == startOfUndefined);
1011
1012
// Copy over any preserved reserved slots.
1013
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
1014
1015
// Fill the rest of the fixed slots with undefined and uninitialized.
1016
if (initContents) {
1017
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1018
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
1019
Min(startOfUndefined, nfixed));
1020
1021
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
1022
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
1023
nfixed);
1024
}
1025
1026
if (ndynamic) {
1027
// We are short one register to do this elegantly. Borrow the obj
1028
// register briefly for our slots base address.
1029
push(obj);
1030
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1031
1032
// Fill uninitialized slots if necessary. Otherwise initialize all
1033
// slots to undefined.
1034
if (startOfUndefined > nfixed) {
1035
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
1036
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
1037
startOfUndefined - nfixed);
1038
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
1039
fillSlotsWithUndefined(Address(obj, offset), temp,
1040
startOfUndefined - nfixed, ndynamic);
1041
} else {
1042
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1043
}
1044
1045
pop(obj);
1046
}
1047
}
1048
1049
#ifdef JS_GC_TRACE
1050
static void TraceCreateObject(JSObject* obj) {
1051
AutoUnsafeCallWithABI unsafe;
1052
js::gc::gcTracer.traceCreateObject(obj);
1053
}
1054
#endif
1055
1056
void MacroAssembler::initGCThing(Register obj, Register temp,
1057
const TemplateObject& templateObj,
1058
bool initContents) {
1059
// Fast initialization of an empty object returned by allocateObject().
1060
1061
storePtr(ImmGCPtr(templateObj.group()),
1062
Address(obj, JSObject::offsetOfGroup()));
1063
1064
storePtr(ImmGCPtr(templateObj.shape()),
1065
Address(obj, JSObject::offsetOfShape()));
1066
1067
if (templateObj.isNative()) {
1068
const NativeTemplateObject& ntemplate =
1069
templateObj.asNativeTemplateObject();
1070
MOZ_ASSERT_IF(!ntemplate.denseElementsAreCopyOnWrite(),
1071
!ntemplate.hasDynamicElements());
1072
MOZ_ASSERT_IF(ntemplate.convertDoubleElements(), ntemplate.isArrayObject());
1073
1074
// If the object has dynamic slots, the slots member has already been
1075
// filled in.
1076
if (!ntemplate.hasDynamicSlots()) {
1077
storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1078
}
1079
1080
if (ntemplate.denseElementsAreCopyOnWrite()) {
1081
storePtr(ImmPtr(ntemplate.getDenseElements()),
1082
Address(obj, NativeObject::offsetOfElements()));
1083
} else if (ntemplate.isArrayObject()) {
1084
int elementsOffset = NativeObject::offsetOfFixedElements();
1085
1086
computeEffectiveAddress(Address(obj, elementsOffset), temp);
1087
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1088
1089
// Fill in the elements header.
1090
store32(
1091
Imm32(ntemplate.getDenseCapacity()),
1092
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1093
store32(Imm32(ntemplate.getDenseInitializedLength()),
1094
Address(obj, elementsOffset +
1095
ObjectElements::offsetOfInitializedLength()));
1096
store32(Imm32(ntemplate.getArrayLength()),
1097
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1098
store32(Imm32(ntemplate.convertDoubleElements()
1099
? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1100
: 0),
1101
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1102
MOZ_ASSERT(!ntemplate.hasPrivate());
1103
} else if (ntemplate.isArgumentsObject()) {
1104
// The caller will initialize the reserved slots.
1105
MOZ_ASSERT(!initContents);
1106
MOZ_ASSERT(!ntemplate.hasPrivate());
1107
storePtr(ImmPtr(emptyObjectElements),
1108
Address(obj, NativeObject::offsetOfElements()));
1109
} else {
1110
// If the target type could be a TypedArray that maps shared memory
1111
// then this would need to store emptyObjectElementsShared in that case.
1112
MOZ_ASSERT(!ntemplate.isSharedMemory());
1113
1114
storePtr(ImmPtr(emptyObjectElements),
1115
Address(obj, NativeObject::offsetOfElements()));
1116
1117
initGCSlots(obj, temp, ntemplate, initContents);
1118
1119
if (ntemplate.hasPrivate() && !ntemplate.isTypedArrayObject()) {
1120
uint32_t nfixed = ntemplate.numFixedSlots();
1121
Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
1122
if (ntemplate.isRegExpObject()) {
1123
// RegExpObject stores a GC thing (RegExpShared*) in its
1124
// private slot, so we have to use ImmGCPtr.
1125
storePtr(ImmGCPtr(ntemplate.regExpShared()), privateSlot);
1126
} else {
1127
storePtr(ImmPtr(ntemplate.getPrivate()), privateSlot);
1128
}
1129
}
1130
}
1131
} else if (templateObj.isInlineTypedObject()) {
1132
JS::AutoAssertNoGC nogc; // off-thread, so cannot GC
1133
size_t nbytes = templateObj.getInlineTypedObjectSize();
1134
const uint8_t* memory = templateObj.getInlineTypedObjectMem(nogc);
1135
1136
// Memcpy the contents of the template object to the new object.
1137
size_t offset = 0;
1138
while (nbytes) {
1139
uintptr_t value = *(uintptr_t*)(memory + offset);
1140
storePtr(ImmWord(value),
1141
Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
1142
nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
1143
offset += sizeof(uintptr_t);
1144
}
1145
} else {
1146
MOZ_CRASH("Unknown object");
1147
}
1148
1149
#ifdef JS_GC_TRACE
1150
AllocatableRegisterSet regs(RegisterSet::Volatile());
1151
LiveRegisterSet save(regs.asLiveSet());
1152
PushRegsInMask(save);
1153
1154
regs.takeUnchecked(obj);
1155
Register temp2 = regs.takeAnyGeneral();
1156
1157
setupUnalignedABICall(temp2);
1158
passABIArg(obj);
1159
callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceCreateObject));
1160
1161
PopRegsInMask(save);
1162
#endif
1163
}
1164
1165
void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
1166
Register result, Label* fail) {
1167
MOZ_ASSERT(left != result);
1168
MOZ_ASSERT(right != result);
1169
MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
1170
1171
Label notPointerEqual;
1172
// If operands point to the same instance, the strings are trivially equal.
1173
branchPtr(Assembler::NotEqual, left, right,
1174
IsEqualityOp(op) ? &notPointerEqual : fail);
1175
move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ || op == JSOP_LE ||
1176
op == JSOP_GE),
1177
result);
1178
1179
if (IsEqualityOp(op)) {
1180
Label done;
1181
jump(&done);
1182
1183
bind(&notPointerEqual);
1184
1185
Label leftIsNotAtom;
1186
Label setNotEqualResult;
1187
// Atoms cannot be equal to each other if they point to different strings.
1188
Imm32 nonAtomBit(JSString::NON_ATOM_BIT);
1189
branchTest32(Assembler::NonZero, Address(left, JSString::offsetOfFlags()),
1190
nonAtomBit, &leftIsNotAtom);
1191
branchTest32(Assembler::Zero, Address(right, JSString::offsetOfFlags()),
1192
nonAtomBit, &setNotEqualResult);
1193
1194
bind(&leftIsNotAtom);
1195
// Strings of different length can never be equal.
1196
loadStringLength(left, result);
1197
branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
1198
result, fail);
1199
1200
bind(&setNotEqualResult);
1201
move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
1202
1203
bind(&done);
1204
}
1205
}
1206
1207
void MacroAssembler::loadStringChars(Register str, Register dest,
1208
CharEncoding encoding) {
1209
MOZ_ASSERT(str != dest);
1210
1211
if (JitOptions.spectreStringMitigations) {
1212
if (encoding == CharEncoding::Latin1) {
1213
// If the string is a rope, zero the |str| register. The code below
1214
// depends on str->flags so this should block speculative execution.
1215
movePtr(ImmWord(0), dest);
1216
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1217
Imm32(JSString::LINEAR_BIT), dest, str);
1218
} else {
1219
// If we're loading TwoByte chars, there's an additional risk:
1220
// if the string has Latin1 chars, we could read out-of-bounds. To
1221
// prevent this, we check both the Linear and Latin1 bits. We don't
1222
// have a scratch register, so we use these flags also to block
1223
// speculative execution, similar to the use of 0 above.
1224
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1225
static constexpr uint32_t Mask =
1226
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1227
static_assert(Mask < 1024,
1228
"Mask should be a small, near-null value to ensure we "
1229
"block speculative execution when it's used as string "
1230
"pointer");
1231
move32(Imm32(Mask), dest);
1232
and32(Address(str, JSString::offsetOfFlags()), dest);
1233
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
1234
str);
1235
}
1236
}
1237
1238
// Load the inline chars.
1239
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1240
dest);
1241
1242
// If it's not an inline string, load the non-inline chars. Use a
1243
// conditional move to prevent speculative execution.
1244
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1245
Imm32(JSString::INLINE_CHARS_BIT),
1246
Address(str, JSString::offsetOfNonInlineChars()), dest);
1247
}
1248
1249
void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
1250
CharEncoding encoding) {
1251
MOZ_ASSERT(str != dest);
1252
1253
if (JitOptions.spectreStringMitigations) {
1254
// If the string is a rope, has inline chars, or has a different
1255
// character encoding, set str to a near-null value to prevent
1256
// speculative execution below (when reading str->nonInlineChars).
1257
1258
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
1259
JSString::INLINE_CHARS_BIT |
1260
JSString::LATIN1_CHARS_BIT;
1261
static_assert(Mask < 1024,
1262
"Mask should be a small, near-null value to ensure we "
1263
"block speculative execution when it's used as string "
1264
"pointer");
1265
1266
uint32_t expectedBits = JSString::LINEAR_BIT;
1267
if (encoding == CharEncoding::Latin1) {
1268
expectedBits |= JSString::LATIN1_CHARS_BIT;
1269
}
1270
1271
move32(Imm32(Mask), dest);
1272
and32(Address(str, JSString::offsetOfFlags()), dest);
1273
1274
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
1275
}
1276
1277
loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1278
}
1279
1280
void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
1281
MOZ_ASSERT(chars != str);
1282
storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1283
}
1284
1285
void MacroAssembler::loadInlineStringCharsForStore(Register str,
1286
Register dest) {
1287
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1288
dest);
1289
}
1290
1291
void MacroAssembler::loadInlineStringChars(Register str, Register dest,
1292
CharEncoding encoding) {
1293
MOZ_ASSERT(str != dest);
1294
1295
if (JitOptions.spectreStringMitigations) {
1296
// Making this Spectre-safe is a bit complicated: using
1297
// computeEffectiveAddress and then zeroing the output register if
1298
// non-inline is not sufficient: when the index is very large, it would
1299
// allow reading |nullptr + index|. Just fall back to loadStringChars
1300
// for now.
1301
loadStringChars(str, dest, encoding);
1302
} else {
1303
computeEffectiveAddress(
1304
Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1305
}
1306
}
1307
1308
void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
1309
MOZ_ASSERT(str != dest);
1310
1311
if (JitOptions.spectreStringMitigations) {
1312
// Zero the output register if the input was not a rope.
1313
movePtr(ImmWord(0), dest);
1314
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1315
Imm32(JSString::LINEAR_BIT),
1316
Address(str, JSRope::offsetOfLeft()), dest);
1317
} else {
1318
loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1319
}
1320
}
1321
1322
void MacroAssembler::storeRopeChildren(Register left, Register right,
1323
Register str) {
1324
storePtr(left, Address(str, JSRope::offsetOfLeft()));
1325
storePtr(right, Address(str, JSRope::offsetOfRight()));
1326
}
1327
1328
void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
1329
MOZ_ASSERT(str != dest);
1330
1331
if (JitOptions.spectreStringMitigations) {
1332
// If the string does not have a base-string, zero the |str| register.
1333
// The code below loads str->base so this should block speculative
1334
// execution.
1335
movePtr(ImmWord(0), dest);
1336
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1337
Imm32(JSString::HAS_BASE_BIT), dest, str);
1338
}
1339
1340
loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1341
}
1342
1343
void MacroAssembler::storeDependentStringBase(Register base, Register str) {
1344
storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1345
}
1346
1347
void MacroAssembler::loadStringChar(Register str, Register index,
1348
Register output, Register scratch,
1349
Label* fail) {
1350
MOZ_ASSERT(str != output);
1351
MOZ_ASSERT(str != index);
1352
MOZ_ASSERT(index != output);
1353
MOZ_ASSERT(output != scratch);
1354
1355
movePtr(str, output);
1356
1357
// This follows JSString::getChar.
1358
Label notRope;
1359
branchIfNotRope(str, &notRope);
1360
1361
loadRopeLeftChild(str, output);
1362
1363
// Check if the index is contained in the leftChild.
1364
// Todo: Handle index in the rightChild.
1365
spectreBoundsCheck32(index, Address(output, JSString::offsetOfLength()),
1366
scratch, fail);
1367
1368
// If the left side is another rope, give up.
1369
branchIfRope(output, fail);
1370
1371
bind(&notRope);
1372
1373
Label isLatin1, done;
1374
// We have to check the left/right side for ropes,
1375
// because a TwoByte rope might have a Latin1 child.
1376
branchLatin1String(output, &isLatin1);
1377
loadStringChars(output, scratch, CharEncoding::TwoByte);
1378
loadChar(scratch, index, output, CharEncoding::TwoByte);
1379
jump(&done);
1380
1381
bind(&isLatin1);
1382
loadStringChars(output, scratch, CharEncoding::Latin1);
1383
loadChar(scratch, index, output, CharEncoding::Latin1);
1384
1385
bind(&done);
1386
}
1387
1388
void MacroAssembler::loadStringIndexValue(Register str, Register dest,
1389
Label* fail) {
1390
MOZ_ASSERT(str != dest);
1391
1392
load32(Address(str, JSString::offsetOfFlags()), dest);
1393
1394
// Does not have a cached index value.
1395
branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1396
1397
// Extract the index.
1398
rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1399
}
1400
1401
void MacroAssembler::loadChar(Register chars, Register index, Register dest,
1402
CharEncoding encoding, int32_t offset /* = 0 */) {
1403
if (encoding == CharEncoding::Latin1) {
1404
loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
1405
} else {
1406
loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
1407
}
1408
}
1409
1410
void MacroAssembler::addToCharPtr(Register chars, Register index,
1411
CharEncoding encoding) {
1412
if (encoding == CharEncoding::Latin1) {
1413
static_assert(sizeof(char) == 1,
1414
"Latin-1 string index shouldn't need scaling");
1415
addPtr(index, chars);
1416
} else {
1417
computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
1418
}
1419
}
1420
1421
void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1422
Label* isObject, Label* isCallable,
1423
Label* isUndefined) {
1424
loadObjClassUnsafe(obj, scratch);
1425
1426
// Proxies can emulate undefined and have complex isCallable behavior.
1427
branchTestClassIsProxy(true, scratch, slow);
1428
1429
// JSFunctions are always callable.
1430
branchPtr(Assembler::Equal, scratch, ImmPtr(&JSFunction::class_), isCallable);
1431
1432
// Objects that emulate undefined.
1433
Address flags(scratch, JSClass::offsetOfFlags());
1434
branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
1435
isUndefined);
1436
1437
// Handle classes with a call hook.
1438
branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
1439
ImmPtr(nullptr), isObject);
1440
1441
loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
1442
branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
1443
ImmPtr(nullptr), isObject);
1444
1445
jump(isCallable);
1446
}
1447
1448
void MacroAssembler::loadJSContext(Register dest) {
1449
JitContext* jcx = GetJitContext();
1450
movePtr(ImmPtr(jcx->runtime->mainContextPtr()), dest);
1451
}
1452
1453
static const uint8_t* ContextRealmPtr() {
1454
return (
1455
static_cast<const uint8_t*>(GetJitContext()->runtime->mainContextPtr()) +
1456
JSContext::offsetOfRealm());
1457
}
1458
1459
void MacroAssembler::switchToRealm(Register realm) {
1460
storePtr(realm, AbsoluteAddress(ContextRealmPtr()));
1461
}
1462
1463
void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
1464
MOZ_ASSERT(realm);
1465
1466
movePtr(ImmPtr(realm), scratch);
1467
switchToRealm(scratch);
1468
}
1469
1470
void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
1471
loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
1472
loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
1473
switchToRealm(scratch);
1474
}
1475
1476
void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
1477
Address envChain(BaselineFrameReg,
1478
BaselineFrame::reverseOffsetOfEnvironmentChain());
1479
loadPtr(envChain, scratch);
1480
switchToObjectRealm(scratch, scratch);
1481
}
1482
1483
void MacroAssembler::switchToWasmTlsRealm(Register scratch1,
1484
Register scratch2) {
1485
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), scratch1);
1486
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, realm)), scratch2);
1487
storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
1488
}
1489
1490
void MacroAssembler::debugAssertContextRealm(const void* realm,
1491
Register scratch) {
1492
#ifdef DEBUG
1493
Label ok;
1494
movePtr(ImmPtr(realm), scratch);
1495
branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr()), scratch, &ok);
1496
assumeUnreachable("Unexpected context realm");
1497
bind(&ok);
1498
#endif
1499
}
1500
1501
void MacroAssembler::guardGroupHasUnanalyzedNewScript(Register group,
1502
Register scratch,
1503
Label* fail) {
1504
Label noNewScript;
1505
load32(Address(group, ObjectGroup::offsetOfFlags()), scratch);
1506
and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
1507
branch32(Assembler::NotEqual, scratch,
1508
Imm32(uint32_t(ObjectGroup::Addendum_NewScript)
1509
<< OBJECT_FLAG_ADDENDUM_SHIFT),
1510
&noNewScript);
1511
1512
// Guard group->newScript()->preliminaryObjects is non-nullptr.
1513
loadPtr(Address(group, ObjectGroup::offsetOfAddendum()), scratch);
1514
branchPtr(Assembler::Equal,
1515
Address(scratch, TypeNewScript::offsetOfPreliminaryObjects()),
1516
ImmWord(0), fail);
1517
1518
bind(&noNewScript);
1519
}
1520
1521
void MacroAssembler::generateBailoutTail(Register scratch,
1522
Register bailoutInfo) {
1523
loadJSContext(scratch);
1524
enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1525
1526
branchIfFalseBool(ReturnReg, exceptionLabel());
1527
1528
// Finish bailing out to Baseline.
1529
{
1530
// Prepare a register set for use in this case.
1531
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1532
MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
1533
!regs.has(AsRegister(getStackPointer())));
1534
regs.take(bailoutInfo);
1535
1536
// Reset SP to the point where clobbering starts.
1537
loadStackPtr(
1538
Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
1539
1540
Register copyCur = regs.takeAny();
1541
Register copyEnd = regs.takeAny();
1542
Register temp = regs.takeAny();
1543
1544
// Copy data onto stack.
1545
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
1546
copyCur);
1547
loadPtr(
1548
Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
1549
copyEnd);
1550
{
1551
Label copyLoop;
1552
Label endOfCopy;
1553
bind(&copyLoop);
1554
branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
1555
subPtr(Imm32(4), copyCur);
1556
subFromStackPtr(Imm32(4));
1557
load32(Address(copyCur, 0), temp);
1558
store32(temp, Address(getStackPointer(), 0));
1559
jump(&copyLoop);
1560
bind(&endOfCopy);
1561
}
1562
1563
// Enter exit frame for the FinishBailoutToBaseline call.
1564
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)),
1565
temp);
1566
load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
1567
makeFrameDescriptor(temp, FrameType::BaselineJS, ExitFrameLayout::Size());
1568
push(temp);
1569
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1570
// No GC things to mark on the stack, push a bare token.
1571
loadJSContext(scratch);
1572
enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1573
1574
// Save needed values onto stack temporarily.
1575
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1576
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1577
1578
// Call a stub to free allocated memory and create arguments objects.
1579