Source code

Revision control

Other Tools

1
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2
* vim: set ts=8 sts=2 et sw=2 tw=80:
3
* This Source Code Form is subject to the terms of the Mozilla Public
4
* License, v. 2.0. If a copy of the MPL was not distributed with this
5
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6
7
#include "jit/MacroAssembler-inl.h"
8
9
#include "mozilla/CheckedInt.h"
10
#include "mozilla/MathAlgorithms.h"
11
12
#include <algorithm>
13
14
#include "jsfriendapi.h"
15
16
#include "builtin/TypedObject.h"
17
#include "gc/GCTrace.h"
18
#include "jit/AtomicOp.h"
19
#include "jit/Bailouts.h"
20
#include "jit/BaselineFrame.h"
21
#include "jit/BaselineIC.h"
22
#include "jit/BaselineJIT.h"
23
#include "jit/JitOptions.h"
24
#include "jit/Lowering.h"
25
#include "jit/MIR.h"
26
#include "jit/MoveEmitter.h"
27
#include "jit/Simulator.h"
28
#include "js/Conversions.h"
29
#include "js/Printf.h"
30
#include "vm/TraceLogging.h"
31
32
#include "gc/Nursery-inl.h"
33
#include "jit/shared/Lowering-shared-inl.h"
34
#include "jit/TemplateObject-inl.h"
35
#include "vm/Interpreter-inl.h"
36
#include "vm/JSObject-inl.h"
37
#include "vm/TypeInference-inl.h"
38
39
using namespace js;
40
using namespace js::jit;
41
42
using JS::GenericNaN;
43
using JS::ToInt32;
44
45
using mozilla::CheckedUint32;
46
47
template <typename T>
48
static void EmitTypeCheck(MacroAssembler& masm, Assembler::Condition cond,
49
const T& src, TypeSet::Type type, Label* label) {
50
if (type.isAnyObject()) {
51
masm.branchTestObject(cond, src, label);
52
return;
53
}
54
switch (type.primitive()) {
55
case ValueType::Double:
56
// TI double type includes int32.
57
masm.branchTestNumber(cond, src, label);
58
break;
59
case ValueType::Int32:
60
masm.branchTestInt32(cond, src, label);
61
break;
62
case ValueType::Boolean:
63
masm.branchTestBoolean(cond, src, label);
64
break;
65
case ValueType::String:
66
masm.branchTestString(cond, src, label);
67
break;
68
case ValueType::Symbol:
69
masm.branchTestSymbol(cond, src, label);
70
break;
71
case ValueType::BigInt:
72
masm.branchTestBigInt(cond, src, label);
73
break;
74
case ValueType::Null:
75
masm.branchTestNull(cond, src, label);
76
break;
77
case ValueType::Undefined:
78
masm.branchTestUndefined(cond, src, label);
79
break;
80
case ValueType::Magic:
81
masm.branchTestMagic(cond, src, label);
82
break;
83
case ValueType::PrivateGCThing:
84
case ValueType::Object:
85
MOZ_CRASH("Unexpected type");
86
}
87
}
88
89
template <typename Source>
90
void MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types,
91
BarrierKind kind, Register unboxScratch,
92
Register objScratch,
93
Register spectreRegToZero, Label* miss) {
94
// unboxScratch may be InvalidReg on 32-bit platforms. It should only be
95
// used for extracting the Value tag or payload.
96
//
97
// objScratch may be InvalidReg if the TypeSet does not contain specific
98
// objects to guard on. It should only be used for guardObjectType.
99
//
100
// spectreRegToZero is a register that will be zeroed by guardObjectType on
101
// speculatively executed paths.
102
103
MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
104
MOZ_ASSERT(!types->unknown());
105
106
Label matched;
107
TypeSet::Type tests[] = {TypeSet::Int32Type(), TypeSet::UndefinedType(),
108
TypeSet::BooleanType(), TypeSet::StringType(),
109
TypeSet::SymbolType(), TypeSet::BigIntType(),
110
TypeSet::NullType(), TypeSet::MagicArgType(),
111
TypeSet::AnyObjectType()};
112
113
// The double type also implies Int32.
114
// So replace the int32 test with the double one.
115
if (types->hasType(TypeSet::DoubleType())) {
116
MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
117
tests[0] = TypeSet::DoubleType();
118
}
119
120
unsigned numBranches = 0;
121
for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
122
if (types->hasType(tests[i])) {
123
numBranches++;
124
}
125
}
126
127
if (!types->unknownObject() && types->getObjectCount() > 0) {
128
numBranches++;
129
}
130
131
if (numBranches == 0) {
132
MOZ_ASSERT(types->empty());
133
jump(miss);
134
return;
135
}
136
137
Register tag = extractTag(address, unboxScratch);
138
139
// Emit all typed tests.
140
for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
141
if (!types->hasType(tests[i])) {
142
continue;
143
}
144
145
if (--numBranches > 0) {
146
EmitTypeCheck(*this, Equal, tag, tests[i], &matched);
147
} else {
148
EmitTypeCheck(*this, NotEqual, tag, tests[i], miss);
149
}
150
}
151
152
// If we don't have specific objects to check for, we're done.
153
if (numBranches == 0) {
154
MOZ_ASSERT(types->unknownObject() || types->getObjectCount() == 0);
155
bind(&matched);
156
return;
157
}
158
159
// Test specific objects.
160
MOZ_ASSERT(objScratch != InvalidReg);
161
MOZ_ASSERT(objScratch != unboxScratch);
162
163
MOZ_ASSERT(numBranches == 1);
164
branchTestObject(NotEqual, tag, miss);
165
166
if (kind != BarrierKind::TypeTagOnly) {
167
Register obj = extractObject(address, unboxScratch);
168
guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
169
} else {
170
#ifdef DEBUG
171
Label fail;
172
Register obj = extractObject(address, unboxScratch);
173
guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
174
jump(&matched);
175
176
bind(&fail);
177
guardTypeSetMightBeIncomplete(types, obj, objScratch, &matched);
178
assumeUnreachable("Unexpected object type");
179
#endif
180
}
181
182
bind(&matched);
183
}
184
185
#ifdef DEBUG
186
// guardTypeSetMightBeIncomplete is only used in DEBUG builds. If this ever
187
// changes, we need to make sure it's Spectre-safe.
188
void MacroAssembler::guardTypeSetMightBeIncomplete(const TypeSet* types,
189
Register obj,
190
Register scratch,
191
Label* label) {
192
// Type set guards might miss when an object's group changes. In this case
193
// either its old group's properties will become unknown, or it will change
194
// to a native object with an original unboxed group. Jump to label if this
195
// might have happened for the input object.
196
197
if (types->unknownObject()) {
198
jump(label);
199
return;
200
}
201
202
for (size_t i = 0; i < types->getObjectCount(); i++) {
203
if (JSObject* singleton = getSingletonAndDelayBarrier(types, i)) {
204
movePtr(ImmGCPtr(singleton), scratch);
205
loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
206
} else if (ObjectGroup* group = getGroupAndDelayBarrier(types, i)) {
207
movePtr(ImmGCPtr(group), scratch);
208
} else {
209
continue;
210
}
211
branchTest32(Assembler::NonZero,
212
Address(scratch, ObjectGroup::offsetOfFlags()),
213
Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
214
}
215
}
216
#endif
217
218
void MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
219
Register scratch,
220
Register spectreRegToZero, Label* miss) {
221
MOZ_ASSERT(obj != scratch);
222
MOZ_ASSERT(!types->unknown());
223
MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
224
MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
225
226
// Note: this method elides read barriers on values read from type sets, as
227
// this may be called off thread during Ion compilation. This is
228
// safe to do as the final JitCode object will be allocated during the
229
// incremental GC (or the compilation canceled before we start sweeping),
230
// see CodeGenerator::link. Other callers should use TypeSet::readBarrier
231
// to trigger the barrier on the contents of type sets passed in here.
232
Label matched;
233
234
bool hasSingletons = false;
235
bool hasObjectGroups = false;
236
unsigned numBranches = 0;
237
238
unsigned count = types->getObjectCount();
239
for (unsigned i = 0; i < count; i++) {
240
if (types->hasGroup(i)) {
241
hasObjectGroups = true;
242
numBranches++;
243
} else if (types->hasSingleton(i)) {
244
hasSingletons = true;
245
numBranches++;
246
}
247
}
248
249
if (numBranches == 0) {
250
jump(miss);
251
return;
252
}
253
254
if (JitOptions.spectreObjectMitigationsBarriers) {
255
move32(Imm32(0), scratch);
256
}
257
258
if (hasSingletons) {
259
for (unsigned i = 0; i < count; i++) {
260
JSObject* singleton = getSingletonAndDelayBarrier(types, i);
261
if (!singleton) {
262
continue;
263
}
264
265
if (JitOptions.spectreObjectMitigationsBarriers) {
266
if (--numBranches > 0) {
267
Label next;
268
branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
269
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
270
jump(&matched);
271
bind(&next);
272
} else {
273
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
274
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
275
}
276
} else {
277
if (--numBranches > 0) {
278
branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
279
} else {
280
branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
281
}
282
}
283
}
284
}
285
286
if (hasObjectGroups) {
287
comment("has object groups");
288
289
// If Spectre mitigations are enabled, we use the scratch register as
290
// zero register. Without mitigations we can use it to store the group.
291
Address groupAddr(obj, JSObject::offsetOfGroup());
292
if (!JitOptions.spectreObjectMitigationsBarriers) {
293
loadPtr(groupAddr, scratch);
294
}
295
296
for (unsigned i = 0; i < count; i++) {
297
ObjectGroup* group = getGroupAndDelayBarrier(types, i);
298
if (!group) {
299
continue;
300
}
301
302
if (!pendingObjectGroupReadBarriers_.append(group)) {
303
setOOM();
304
return;
305
}
306
307
if (JitOptions.spectreObjectMitigationsBarriers) {
308
if (--numBranches > 0) {
309
Label next;
310
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
311
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
312
jump(&matched);
313
bind(&next);
314
} else {
315
branchPtr(NotEqual, groupAddr, ImmGCPtr(group), miss);
316
spectreMovePtr(NotEqual, scratch, spectreRegToZero);
317
}
318
} else {
319
if (--numBranches > 0) {
320
branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
321
} else {
322
branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
323
}
324
}
325
}
326
}
327
328
MOZ_ASSERT(numBranches == 0);
329
330
bind(&matched);
331
}
332
333
template void MacroAssembler::guardTypeSet(
334
const Address& address, const TypeSet* types, BarrierKind kind,
335
Register unboxScratch, Register objScratch, Register spectreRegToZero,
336
Label* miss);
337
template void MacroAssembler::guardTypeSet(
338
const ValueOperand& value, const TypeSet* types, BarrierKind kind,
339
Register unboxScratch, Register objScratch, Register spectreRegToZero,
340
Label* miss);
341
template void MacroAssembler::guardTypeSet(
342
const TypedOrValueRegister& value, const TypeSet* types, BarrierKind kind,
343
Register unboxScratch, Register objScratch, Register spectreRegToZero,
344
Label* miss);
345
346
template <typename S, typename T>
347
static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
348
const S& value, const T& dest) {
349
switch (arrayType) {
350
case Scalar::Float32:
351
masm.storeFloat32(value, dest);
352
break;
353
case Scalar::Float64:
354
masm.storeDouble(value, dest);
355
break;
356
default:
357
MOZ_CRASH("Invalid typed array type");
358
}
359
}
360
361
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
362
FloatRegister value,
363
const BaseIndex& dest) {
364
StoreToTypedFloatArray(*this, arrayType, value, dest);
365
}
366
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
367
FloatRegister value,
368
const Address& dest) {
369
StoreToTypedFloatArray(*this, arrayType, value, dest);
370
}
371
372
template <typename S, typename T>
373
static void StoreToTypedBigIntArray(MacroAssembler& masm,
374
Scalar::Type arrayType, const S& value,
375
const T& dest) {
376
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
377
masm.store64(value, dest);
378
}
379
380
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
381
Register64 value,
382
const BaseIndex& dest) {
383
StoreToTypedBigIntArray(*this, arrayType, value, dest);
384
}
385
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
386
Register64 value,
387
const Address& dest) {
388
StoreToTypedBigIntArray(*this, arrayType, value, dest);
389
}
390
391
template <typename T>
392
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
393
AnyRegister dest, Register temp,
394
Label* fail, bool canonicalizeDoubles) {
395
switch (arrayType) {
396
case Scalar::Int8:
397
load8SignExtend(src, dest.gpr());
398
break;
399
case Scalar::Uint8:
400
case Scalar::Uint8Clamped:
401
load8ZeroExtend(src, dest.gpr());
402
break;
403
case Scalar::Int16:
404
load16SignExtend(src, dest.gpr());
405
break;
406
case Scalar::Uint16:
407
load16ZeroExtend(src, dest.gpr());
408
break;
409
case Scalar::Int32:
410
load32(src, dest.gpr());
411
break;
412
case Scalar::Uint32:
413
if (dest.isFloat()) {
414
load32(src, temp);
415
convertUInt32ToDouble(temp, dest.fpu());
416
} else {
417
load32(src, dest.gpr());
418
419
// Bail out if the value doesn't fit into a signed int32 value. This
420
// is what allows MLoadUnboxedScalar to have a type() of
421
// MIRType::Int32 for UInt32 array loads.
422
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
423
}
424
break;
425
case Scalar::Float32:
426
loadFloat32(src, dest.fpu());
427
canonicalizeFloat(dest.fpu());
428
break;
429
case Scalar::Float64:
430
loadDouble(src, dest.fpu());
431
if (canonicalizeDoubles) {
432
canonicalizeDouble(dest.fpu());
433
}
434
break;
435
case Scalar::BigInt64:
436
case Scalar::BigUint64:
437
default:
438
MOZ_CRASH("Invalid typed array type");
439
}
440
}
441
442
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
443
const Address& src,
444
AnyRegister dest,
445
Register temp, Label* fail,
446
bool canonicalizeDoubles);
447
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
448
const BaseIndex& src,
449
AnyRegister dest,
450
Register temp, Label* fail,
451
bool canonicalizeDoubles);
452
453
template <typename T>
454
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
455
const ValueOperand& dest,
456
bool allowDouble, Register temp,
457
Label* fail) {
458
switch (arrayType) {
459
case Scalar::Int8:
460
case Scalar::Uint8:
461
case Scalar::Uint8Clamped:
462
case Scalar::Int16:
463
case Scalar::Uint16:
464
case Scalar::Int32:
465
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
466
InvalidReg, nullptr);
467
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
468
break;
469
case Scalar::Uint32:
470
// Don't clobber dest when we could fail, instead use temp.
471
load32(src, temp);
472
if (allowDouble) {
473
// If the value fits in an int32, store an int32 type tag.
474
// Else, convert the value to double and box it.
475
Label done, isDouble;
476
branchTest32(Assembler::Signed, temp, temp, &isDouble);
477
{
478
tagValue(JSVAL_TYPE_INT32, temp, dest);
479
jump(&done);
480
}
481
bind(&isDouble);
482
{
483
ScratchDoubleScope fpscratch(*this);
484
convertUInt32ToDouble(temp, fpscratch);
485
boxDouble(fpscratch, dest, fpscratch);
486
}
487
bind(&done);
488
} else {
489
// Bailout if the value does not fit in an int32.
490
branchTest32(Assembler::Signed, temp, temp, fail);
491
tagValue(JSVAL_TYPE_INT32, temp, dest);
492
}
493
break;
494
case Scalar::Float32: {
495
ScratchDoubleScope dscratch(*this);
496
FloatRegister fscratch = dscratch.asSingle();
497
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
498
dest.scratchReg(), nullptr);
499
convertFloat32ToDouble(fscratch, dscratch);
500
boxDouble(dscratch, dest, dscratch);
501
break;
502
}
503
case Scalar::Float64: {
504
ScratchDoubleScope fpscratch(*this);
505
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
506
dest.scratchReg(), nullptr);
507
boxDouble(fpscratch, dest, fpscratch);
508
break;
509
}
510
case Scalar::BigInt64:
511
case Scalar::BigUint64:
512
default:
513
MOZ_CRASH("Invalid typed array type");
514
}
515
}
516
517
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
518
const Address& src,
519
const ValueOperand& dest,
520
bool allowDouble,
521
Register temp, Label* fail);
522
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
523
const BaseIndex& src,
524
const ValueOperand& dest,
525
bool allowDouble,
526
Register temp, Label* fail);
527
528
template <typename T>
529
void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
530
const T& src, Register bigInt,
531
Register64 temp) {
532
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
533
534
load64(src, temp);
535
initializeBigInt64(arrayType, bigInt, temp);
536
}
537
538
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
539
const Address& src,
540
Register bigInt,
541
Register64 temp);
542
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
543
const BaseIndex& src,
544
Register bigInt,
545
Register64 temp);
546
547
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
548
// and bails for anything that cannot be handled with our jit allocators.
549
void MacroAssembler::checkAllocatorState(Label* fail) {
550
// Don't execute the inline path if we are tracing allocations.
551
if (js::gc::gcTracer.traceEnabled()) {
552
jump(fail);
553
}
554
555
#ifdef JS_GC_ZEAL
556
// Don't execute the inline path if gc zeal or tracing are active.
557
const uint32_t* ptrZealModeBits =
558
GetJitContext()->runtime->addressOfGCZealModeBits();
559
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
560
fail);
561
#endif
562
563
// Don't execute the inline path if the realm has an object metadata callback,
564
// as the metadata to use for the object may vary between executions of the
565
// op.
566
if (GetJitContext()->realm()->hasAllocationMetadataBuilder()) {
567
jump(fail);
568
}
569
}
570
571
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
572
gc::InitialHeap initialHeap) {
573
// Note that Ion elides barriers on writes to objects known to be in the
574
// nursery, so any allocation that can be made into the nursery must be made
575
// into the nursery, even if the nursery is disabled. At runtime these will
576
// take the out-of-line path, which is required to insert a barrier for the
577
// initializing writes.
578
return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
579
}
580
581
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
582
// this fills in the slots_ pointer.
583
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
584
gc::AllocKind allocKind,
585
size_t nDynamicSlots, Label* fail) {
586
MOZ_ASSERT(IsNurseryAllocable(allocKind));
587
588
// We still need to allocate in the nursery, per the comment in
589
// shouldNurseryAllocate; however, we need to insert into the
590
// mallocedBuffers set, so bail to do the nursery allocation in the
591
// interpreter.
592
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
593
jump(fail);
594
return;
595
}
596
597
// No explicit check for nursery.isEnabled() is needed, as the comparison
598
// with the nursery's end will always fail in such cases.
599
CompileZone* zone = GetJitContext()->realm()->zone();
600
size_t thingSize = gc::Arena::thingSize(allocKind);
601
size_t totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
602
MOZ_ASSERT(totalSize < INT32_MAX);
603
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
604
605
bumpPointerAllocate(result, temp, fail, zone->addressOfNurseryPosition(),
606
zone->addressOfNurseryCurrentEnd(), totalSize, totalSize);
607
608
if (nDynamicSlots) {
609
computeEffectiveAddress(Address(result, thingSize), temp);
610
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
611
}
612
}
613
614
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
615
void MacroAssembler::freeListAllocate(Register result, Register temp,
616
gc::AllocKind allocKind, Label* fail) {
617
CompileZone* zone = GetJitContext()->realm()->zone();
618
int thingSize = int(gc::Arena::thingSize(allocKind));
619
620
Label fallback;
621
Label success;
622
623
// Load the first and last offsets of |zone|'s free list for |allocKind|.
624
// If there is no room remaining in the span, fall back to get the next one.
625
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
626
loadPtr(AbsoluteAddress(ptrFreeList), temp);
627
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
628
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
629
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
630
631
// Bump the offset for the next allocation.
632
add32(Imm32(thingSize), result);
633
loadPtr(AbsoluteAddress(ptrFreeList), temp);
634
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
635
sub32(Imm32(thingSize), result);
636
addPtr(temp, result); // Turn the offset into a pointer.
637
jump(&success);
638
639
bind(&fallback);
640
// If there are no free spans left, we bail to finish the allocation. The
641
// interpreter will call the GC allocator to set up a new arena to allocate
642
// from, after which we can resume allocating in the jit.
643
branchTest32(Assembler::Zero, result, result, fail);
644
loadPtr(AbsoluteAddress(ptrFreeList), temp);
645
addPtr(temp, result); // Turn the offset into a pointer.
646
Push(result);
647
// Update the free list to point to the next span (which may be empty).
648
load32(Address(result, 0), result);
649
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
650
Pop(result);
651
652
bind(&success);
653
654
if (GetJitContext()->runtime->geckoProfiler().enabled()) {
655
uint32_t* countAddress =
656
GetJitContext()->runtime->addressOfTenuredAllocCount();
657
movePtr(ImmPtr(countAddress), temp);
658
add32(Imm32(1), Address(temp, 0));
659
}
660
}
661
662
void MacroAssembler::callFreeStub(Register slots) {
663
// This register must match the one in JitRuntime::generateFreeStub.
664
const Register regSlots = CallTempReg0;
665
666
push(regSlots);
667
movePtr(slots, regSlots);
668
call(GetJitContext()->runtime->jitRuntime()->freeStub());
669
pop(regSlots);
670
}
671
672
// Inlined equivalent of gc::AllocateObject, without failure case handling.
673
void MacroAssembler::allocateObject(Register result, Register temp,
674
gc::AllocKind allocKind,
675
uint32_t nDynamicSlots,
676
gc::InitialHeap initialHeap, Label* fail) {
677
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
678
679
checkAllocatorState(fail);
680
681
if (shouldNurseryAllocate(allocKind, initialHeap)) {
682
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
683
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail);
684
}
685
686
// Fall back to calling into the VM to allocate objects in the tenured heap
687
// that have dynamic slots.
688
if (nDynamicSlots) {
689
jump(fail);
690
return;
691
}
692
693
return freeListAllocate(result, temp, allocKind, fail);
694
}
695
696
void MacroAssembler::createGCObject(Register obj, Register temp,
697
const TemplateObject& templateObj,
698
gc::InitialHeap initialHeap, Label* fail,
699
bool initContents) {
700
gc::AllocKind allocKind = templateObj.getAllocKind();
701
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
702
703
uint32_t nDynamicSlots = 0;
704
if (templateObj.isNative()) {
705
const NativeTemplateObject& ntemplate =
706
templateObj.asNativeTemplateObject();
707
nDynamicSlots = ntemplate.numDynamicSlots();
708
709
// Arrays with copy on write elements do not need fixed space for an
710
// elements header. The template object, which owns the original
711
// elements, might have another allocation kind.
712
if (ntemplate.denseElementsAreCopyOnWrite()) {
713
allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
714
}
715
}
716
717
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
718
initGCThing(obj, temp, templateObj, initContents);
719
}
720
721
// Inlined equivalent of gc::AllocateNonObject, without failure case handling.
722
// Non-object allocation does not need to worry about slots, so can take a
723
// simpler path.
724
void MacroAssembler::allocateNonObject(Register result, Register temp,
725
gc::AllocKind allocKind, Label* fail) {
726
checkAllocatorState(fail);
727
freeListAllocate(result, temp, allocKind, fail);
728
}
729
730
// Inline version of Nursery::allocateString.
731
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
732
gc::AllocKind allocKind,
733
Label* fail) {
734
MOZ_ASSERT(IsNurseryAllocable(allocKind));
735
736
// No explicit check for nursery.isEnabled() is needed, as the comparison
737
// with the nursery's end will always fail in such cases.
738
739
CompileZone* zone = GetJitContext()->realm()->zone();
740
size_t thingSize = gc::Arena::thingSize(allocKind);
741
size_t totalSize = js::Nursery::stringHeaderSize() + thingSize;
742
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
743
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
744
745
bumpPointerAllocate(
746
result, temp, fail, zone->addressOfStringNurseryPosition(),
747
zone->addressOfStringNurseryCurrentEnd(), totalSize, thingSize);
748
storePtr(ImmPtr(zone), Address(result, -js::Nursery::stringHeaderSize()));
749
}
750
751
// Inline version of Nursery::allocateBigInt.
752
void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
753
Label* fail) {
754
MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
755
756
// No explicit check for nursery.isEnabled() is needed, as the comparison
757
// with the nursery's end will always fail in such cases.
758
759
CompileZone* zone = GetJitContext()->realm()->zone();
760
size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
761
size_t totalSize = js::Nursery::bigIntHeaderSize() + thingSize;
762
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
763
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
764
765
bumpPointerAllocate(
766
result, temp, fail, zone->addressOfBigIntNurseryPosition(),
767
zone->addressOfBigIntNurseryCurrentEnd(), totalSize, thingSize);
768
storePtr(ImmPtr(zone), Address(result, -js::Nursery::bigIntHeaderSize()));
769
}
770
771
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
772
Label* fail, void* posAddr,
773
const void* curEndAddr,
774
uint32_t totalSize, uint32_t size) {
775
// The position (allocation pointer) and the end pointer are stored
776
// very close to each other -- specifically, easily within a 32 bit offset.
777
// Use relative offsets between them, to avoid 64-bit immediate loads.
778
//
779
// I tried to optimise this further by using an extra register to avoid
780
// the final subtraction and hopefully get some more instruction
781
// parallelism, but it made no difference.
782
movePtr(ImmPtr(posAddr), temp);
783
loadPtr(Address(temp, 0), result);
784
addPtr(Imm32(totalSize), result);
785
CheckedInt<int32_t> endOffset =
786
(CheckedInt<uintptr_t>(uintptr_t(curEndAddr)) -
787
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
788
.toChecked<int32_t>();
789
MOZ_ASSERT(endOffset.isValid(), "Position and end pointers must be nearby");
790
branchPtr(Assembler::Below, Address(temp, endOffset.value()), result, fail);
791
storePtr(result, Address(temp, 0));
792
subPtr(Imm32(size), result);
793
794
if (GetJitContext()->runtime->geckoProfiler().enabled()) {
795
CompileZone* zone = GetJitContext()->realm()->zone();
796
uint32_t* countAddress = zone->addressOfNurseryAllocCount();
797
CheckedInt<int32_t> counterOffset =
798
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
799
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
800
.toChecked<int32_t>();
801
if (counterOffset.isValid()) {
802
add32(Imm32(1), Address(temp, counterOffset.value()));
803
} else {
804
movePtr(ImmPtr(countAddress), temp);
805
add32(Imm32(1), Address(temp, 0));
806
}
807
}
808
}
809
810
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
811
// allocation requested but unsuccessful.
812
void MacroAssembler::allocateString(Register result, Register temp,
813
gc::AllocKind allocKind,
814
gc::InitialHeap initialHeap, Label* fail) {
815
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
816
allocKind == gc::AllocKind::FAT_INLINE_STRING);
817
818
checkAllocatorState(fail);
819
820
if (shouldNurseryAllocate(allocKind, initialHeap)) {
821
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
822
return nurseryAllocateString(result, temp, allocKind, fail);
823
}
824
825
freeListAllocate(result, temp, allocKind, fail);
826
}
827
828
void MacroAssembler::newGCString(Register result, Register temp, Label* fail,
829
bool attemptNursery) {
830
allocateString(result, temp, js::gc::AllocKind::STRING,
831
attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
832
}
833
834
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
835
Label* fail, bool attemptNursery) {
836
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
837
attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
838
}
839
840
void MacroAssembler::newGCBigInt(Register result, Register temp, Label* fail,
841
bool attemptNursery) {
842
checkAllocatorState(fail);
843
844
gc::InitialHeap initialHeap =
845
attemptNursery ? gc::DefaultHeap : gc::TenuredHeap;
846
if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
847
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
848
return nurseryAllocateBigInt(result, temp, fail);
849
}
850
851
freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
852
}
853
854
void MacroAssembler::copySlotsFromTemplate(
855
Register obj, const NativeTemplateObject& templateObj, uint32_t start,
856
uint32_t end) {
857
uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
858
for (unsigned i = start; i < nfixed; i++) {
859
// Template objects are not exposed to script and therefore immutable.
860
// However, regexp template objects are sometimes used directly (when
861
// the cloning is not observable), and therefore we can end up with a
862
// non-zero lastIndex. Detect this case here and just substitute 0, to
863
// avoid racing with the main thread updating this slot.
864
Value v;
865
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
866
v = Int32Value(0);
867
} else {
868
v = templateObj.getSlot(i);
869
}
870
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
871
}
872
}
873
874
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
875
uint32_t start, uint32_t end,
876
const Value& v) {
877
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
878
879
if (start >= end) {
880
return;
881
}
882
883
#ifdef JS_NUNBOX32
884
// We only have a single spare register, so do the initialization as two
885
// strided writes of the tag and body.
886
Address addr = base;
887
move32(Imm32(v.toNunboxPayload()), temp);
888
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
889
store32(temp, ToPayload(addr));
890
}
891
892
addr = base;
893
move32(Imm32(v.toNunboxTag()), temp);
894
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue)) {
895
store32(temp, ToType(addr));
896
}
897
#else
898
moveValue(v, ValueOperand(temp));
899
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue)) {
900
storePtr(temp, base);
901
}
902
#endif
903
}
904
905
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
906
uint32_t start, uint32_t end) {
907
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
908
}
909
910
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
911
uint32_t start, uint32_t end) {
912
fillSlotsWithConstantValue(base, temp, start, end,
913
MagicValue(JS_UNINITIALIZED_LEXICAL));
914
}
915
916
static void FindStartOfUninitializedAndUndefinedSlots(
917
const NativeTemplateObject& templateObj, uint32_t nslots,
918
uint32_t* startOfUninitialized, uint32_t* startOfUndefined) {
919
MOZ_ASSERT(nslots == templateObj.slotSpan());
920
MOZ_ASSERT(nslots > 0);
921
922
uint32_t first = nslots;
923
for (; first != 0; --first) {
924
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
925
break;
926
}
927
}
928
*startOfUndefined = first;
929
930
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
931
for (; first != 0; --first) {
932
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
933
break;
934
}
935
}
936
*startOfUninitialized = first;
937
} else {
938
*startOfUninitialized = *startOfUndefined;
939
}
940
}
941
942
static void AllocateAndInitTypedArrayBuffer(JSContext* cx,
943
TypedArrayObject* obj,
944
int32_t count) {
945
AutoUnsafeCallWithABI unsafe;
946
947
obj->initPrivate(nullptr);
948
949
// Negative numbers or zero will bail out to the slow path, which in turn will
950
// raise an invalid argument exception or create a correct object with zero
951
// elements.
952
if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
953
obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
954
return;
955
}
956
957
obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
958
959
size_t nbytes = count * obj->bytesPerElement();
960
MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid(),
961
"RoundUp must not overflow");
962
963
nbytes = RoundUp(nbytes, sizeof(Value));
964
void* buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
965
js::ArrayBufferContentsArena);
966
if (buf) {
967
InitObjectPrivate(obj, buf, nbytes, MemoryUse::TypedArrayElements);
968
}
969
}
970
971
void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
972
Register lengthReg,
973
LiveRegisterSet liveRegs, Label* fail,
974
TypedArrayObject* templateObj,
975
TypedArrayLength lengthKind) {
976
MOZ_ASSERT(templateObj->hasPrivate());
977
MOZ_ASSERT(!templateObj->hasBuffer());
978
979
constexpr size_t dataSlotOffset = TypedArrayObject::dataOffset();
980
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
981
982
static_assert(
983
TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
984
"fixed inline element data assumed to begin after the data slot");
985
986
static_assert(
987
TypedArrayObject::INLINE_BUFFER_LIMIT ==
988
JSObject::MAX_BYTE_SIZE - dataOffset,
989
"typed array inline buffer is limited by the maximum object byte size");
990
991
// Initialise data elements to zero.
992
int32_t length = templateObj->length();
993
size_t nbytes = length * templateObj->bytesPerElement();
994
995
if (lengthKind == TypedArrayLength::Fixed &&
996
nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
997
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
998
999
// Store data elements inside the remaining JSObject slots.
1000
computeEffectiveAddress(Address(obj, dataOffset), temp);
1001
storePtr(temp, Address(obj, dataSlotOffset));
1002
1003
// Write enough zero pointers into fixed data to zero every
1004
// element. (This zeroes past the end of a byte count that's
1005
// not a multiple of pointer size. That's okay, because fixed
1006
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
1007
// and we won't inline unless the desired memory fits in that
1008
// space.)
1009
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
1010
1011
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
1012
for (size_t i = 0; i < numZeroPointers; i++) {
1013
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
1014
}
1015
#ifdef DEBUG
1016
if (nbytes == 0) {
1017
store8(Imm32(TypedArrayObject::ZeroLengthArrayData),
1018
Address(obj, dataSlotOffset));
1019
}
1020
#endif
1021
} else {
1022
if (lengthKind == TypedArrayLength::Fixed) {
1023
move32(Imm32(length), lengthReg);
1024
}
1025
1026
// Allocate a buffer on the heap to store the data elements.
1027
liveRegs.addUnchecked(temp);
1028
liveRegs.addUnchecked(obj);
1029
liveRegs.addUnchecked(lengthReg);
1030
PushRegsInMask(liveRegs);
1031
setupUnalignedABICall(temp);
1032
loadJSContext(temp);
1033
passABIArg(temp);
1034
passABIArg(obj);
1035
passABIArg(lengthReg);
1036
callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateAndInitTypedArrayBuffer));
1037
PopRegsInMask(liveRegs);
1038
1039
// Fail when data elements is set to NULL.
1040
branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
1041
}
1042
}
1043
1044
void MacroAssembler::initGCSlots(Register obj, Register temp,
1045
const NativeTemplateObject& templateObj,
1046
bool initContents) {
1047
// Slots of non-array objects are required to be initialized.
1048
// Use the values currently in the template object.
1049
uint32_t nslots = templateObj.slotSpan();
1050
if (nslots == 0) {
1051
return;
1052
}
1053
1054
uint32_t nfixed = templateObj.numUsedFixedSlots();
1055
uint32_t ndynamic = templateObj.numDynamicSlots();
1056
1057
// Attempt to group slot writes such that we minimize the amount of
1058
// duplicated data we need to embed in code and load into registers. In
1059
// general, most template object slots will be undefined except for any
1060
// reserved slots. Since reserved slots come first, we split the object
1061
// logically into independent non-UndefinedValue writes to the head and
1062
// duplicated writes of UndefinedValue to the tail. For the majority of
1063
// objects, the "tail" will be the entire slot range.
1064
//
1065
// The template object may be a CallObject, in which case we need to
1066
// account for uninitialized lexical slots as well as undefined
1067
// slots. Unitialized lexical slots appears in CallObjects if the function
1068
// has parameter expressions, in which case closed over parameters have
1069
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
1070
uint32_t startOfUninitialized = nslots;
1071
uint32_t startOfUndefined = nslots;
1072
FindStartOfUninitializedAndUndefinedSlots(
1073
templateObj, nslots, &startOfUninitialized, &startOfUndefined);
1074
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
1075
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
1076
MOZ_ASSERT_IF(!templateObj.isCallObject(),
1077
startOfUninitialized == startOfUndefined);
1078
1079
// Copy over any preserved reserved slots.
1080
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
1081
1082
// Fill the rest of the fixed slots with undefined and uninitialized.
1083
if (initContents) {
1084
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1085
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
1086
std::min(startOfUndefined, nfixed));
1087
1088
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
1089
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
1090
nfixed);
1091
}
1092
1093
if (ndynamic) {
1094
// We are short one register to do this elegantly. Borrow the obj
1095
// register briefly for our slots base address.
1096
push(obj);
1097
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1098
1099
// Fill uninitialized slots if necessary. Otherwise initialize all
1100
// slots to undefined.
1101
if (startOfUndefined > nfixed) {
1102
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
1103
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
1104
startOfUndefined - nfixed);
1105
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
1106
fillSlotsWithUndefined(Address(obj, offset), temp,
1107
startOfUndefined - nfixed, ndynamic);
1108
} else {
1109
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1110
}
1111
1112
pop(obj);
1113
}
1114
}
1115
1116
#ifdef JS_GC_TRACE
1117
static void TraceCreateObject(JSObject* obj) {
1118
AutoUnsafeCallWithABI unsafe;
1119
js::gc::gcTracer.traceCreateObject(obj);
1120
}
1121
#endif
1122
1123
void MacroAssembler::initGCThing(Register obj, Register temp,
1124
const TemplateObject& templateObj,
1125
bool initContents) {
1126
// Fast initialization of an empty object returned by allocateObject().
1127
1128
storePtr(ImmGCPtr(templateObj.group()),
1129
Address(obj, JSObject::offsetOfGroup()));
1130
1131
storePtr(ImmGCPtr(templateObj.shape()),
1132
Address(obj, JSObject::offsetOfShape()));
1133
1134
if (templateObj.isNative()) {
1135
const NativeTemplateObject& ntemplate =
1136
templateObj.asNativeTemplateObject();
1137
MOZ_ASSERT_IF(!ntemplate.denseElementsAreCopyOnWrite(),
1138
!ntemplate.hasDynamicElements());
1139
MOZ_ASSERT_IF(ntemplate.convertDoubleElements(), ntemplate.isArrayObject());
1140
1141
// If the object has dynamic slots, the slots member has already been
1142
// filled in.
1143
if (!ntemplate.hasDynamicSlots()) {
1144
storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1145
}
1146
1147
if (ntemplate.denseElementsAreCopyOnWrite()) {
1148
storePtr(ImmPtr(ntemplate.getDenseElements()),
1149
Address(obj, NativeObject::offsetOfElements()));
1150
} else if (ntemplate.isArrayObject()) {
1151
int elementsOffset = NativeObject::offsetOfFixedElements();
1152
1153
computeEffectiveAddress(Address(obj, elementsOffset), temp);
1154
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1155
1156
// Fill in the elements header.
1157
store32(
1158
Imm32(ntemplate.getDenseCapacity()),
1159
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1160
store32(Imm32(ntemplate.getDenseInitializedLength()),
1161
Address(obj, elementsOffset +
1162
ObjectElements::offsetOfInitializedLength()));
1163
store32(Imm32(ntemplate.getArrayLength()),
1164
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1165
store32(Imm32(ntemplate.convertDoubleElements()
1166
? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1167
: 0),
1168
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1169
MOZ_ASSERT(!ntemplate.hasPrivate());
1170
} else if (ntemplate.isArgumentsObject()) {
1171
// The caller will initialize the reserved slots.
1172
MOZ_ASSERT(!initContents);
1173
MOZ_ASSERT(!ntemplate.hasPrivate());
1174
storePtr(ImmPtr(emptyObjectElements),
1175
Address(obj, NativeObject::offsetOfElements()));
1176
} else {
1177
// If the target type could be a TypedArray that maps shared memory
1178
// then this would need to store emptyObjectElementsShared in that case.
1179
MOZ_ASSERT(!ntemplate.isSharedMemory());
1180
1181
storePtr(ImmPtr(emptyObjectElements),
1182
Address(obj, NativeObject::offsetOfElements()));
1183
1184
initGCSlots(obj, temp, ntemplate, initContents);
1185
1186
if (ntemplate.hasPrivate() && !ntemplate.isTypedArrayObject()) {
1187
uint32_t nfixed = ntemplate.numFixedSlots();
1188
Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
1189
if (ntemplate.isRegExpObject()) {
1190
// RegExpObject stores a GC thing (RegExpShared*) in its
1191
// private slot, so we have to use ImmGCPtr.
1192
storePtr(ImmGCPtr(ntemplate.regExpShared()), privateSlot);
1193
} else {
1194
storePtr(ImmPtr(ntemplate.getPrivate()), privateSlot);
1195
}
1196
}
1197
}
1198
} else if (templateObj.isInlineTypedObject()) {
1199
JS::AutoAssertNoGC nogc; // off-thread, so cannot GC
1200
size_t nbytes = templateObj.getInlineTypedObjectSize();
1201
const uint8_t* memory = templateObj.getInlineTypedObjectMem(nogc);
1202
1203
// Memcpy the contents of the template object to the new object.
1204
size_t offset = 0;
1205
while (nbytes) {
1206
uintptr_t value = *(uintptr_t*)(memory + offset);
1207
storePtr(ImmWord(value),
1208
Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
1209
nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
1210
offset += sizeof(uintptr_t);
1211
}
1212
} else {
1213
MOZ_CRASH("Unknown object");
1214
}
1215
1216
#ifdef JS_GC_TRACE
1217
AllocatableRegisterSet regs(RegisterSet::Volatile());
1218
LiveRegisterSet save(regs.asLiveSet());
1219
PushRegsInMask(save);
1220
1221
regs.takeUnchecked(obj);
1222
Register temp2 = regs.takeAnyGeneral();
1223
1224
setupUnalignedABICall(temp2);
1225
passABIArg(obj);
1226
callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceCreateObject));
1227
1228
PopRegsInMask(save);
1229
#endif
1230
}
1231
1232
void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
1233
Register result, Label* fail) {
1234
MOZ_ASSERT(left != result);
1235
MOZ_ASSERT(right != result);
1236
MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
1237
1238
Label notPointerEqual;
1239
// If operands point to the same instance, the strings are trivially equal.
1240
branchPtr(Assembler::NotEqual, left, right,
1241
IsEqualityOp(op) ? &notPointerEqual : fail);
1242
move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
1243
op == JSOp::Ge),
1244
result);
1245
1246
if (IsEqualityOp(op)) {
1247
Label done;
1248
jump(&done);
1249
1250
bind(&notPointerEqual);
1251
1252
Label leftIsNotAtom;
1253
Label setNotEqualResult;
1254
// Atoms cannot be equal to each other if they point to different strings.
1255
Imm32 nonAtomBit(JSString::NON_ATOM_BIT);
1256
branchTest32(Assembler::NonZero, Address(left, JSString::offsetOfFlags()),
1257
nonAtomBit, &leftIsNotAtom);
1258
branchTest32(Assembler::Zero, Address(right, JSString::offsetOfFlags()),
1259
nonAtomBit, &setNotEqualResult);
1260
1261
bind(&leftIsNotAtom);
1262
// Strings of different length can never be equal.
1263
loadStringLength(left, result);
1264
branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
1265
result, fail);
1266
1267
bind(&setNotEqualResult);
1268
move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
1269
1270
bind(&done);
1271
}
1272
}
1273
1274
void MacroAssembler::loadStringChars(Register str, Register dest,
1275
CharEncoding encoding) {
1276
MOZ_ASSERT(str != dest);
1277
1278
if (JitOptions.spectreStringMitigations) {
1279
if (encoding == CharEncoding::Latin1) {
1280
// If the string is a rope, zero the |str| register. The code below
1281
// depends on str->flags so this should block speculative execution.
1282
movePtr(ImmWord(0), dest);
1283
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1284
Imm32(JSString::LINEAR_BIT), dest, str);
1285
} else {
1286
// If we're loading TwoByte chars, there's an additional risk:
1287
// if the string has Latin1 chars, we could read out-of-bounds. To
1288
// prevent this, we check both the Linear and Latin1 bits. We don't
1289
// have a scratch register, so we use these flags also to block
1290
// speculative execution, similar to the use of 0 above.
1291
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1292
static constexpr uint32_t Mask =
1293
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1294
static_assert(Mask < 1024,
1295
"Mask should be a small, near-null value to ensure we "
1296
"block speculative execution when it's used as string "
1297
"pointer");
1298
move32(Imm32(Mask), dest);
1299
and32(Address(str, JSString::offsetOfFlags()), dest);
1300
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
1301
str);
1302
}
1303
}
1304
1305
// Load the inline chars.
1306
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1307
dest);
1308
1309
// If it's not an inline string, load the non-inline chars. Use a
1310
// conditional move to prevent speculative execution.
1311
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1312
Imm32(JSString::INLINE_CHARS_BIT),
1313
Address(str, JSString::offsetOfNonInlineChars()), dest);
1314
}
1315
1316
void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
1317
CharEncoding encoding) {
1318
MOZ_ASSERT(str != dest);
1319
1320
if (JitOptions.spectreStringMitigations) {
1321
// If the string is a rope, has inline chars, or has a different
1322
// character encoding, set str to a near-null value to prevent
1323
// speculative execution below (when reading str->nonInlineChars).
1324
1325
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
1326
JSString::INLINE_CHARS_BIT |
1327
JSString::LATIN1_CHARS_BIT;
1328
static_assert(Mask < 1024,
1329
"Mask should be a small, near-null value to ensure we "
1330
"block speculative execution when it's used as string "
1331
"pointer");
1332
1333
uint32_t expectedBits = JSString::LINEAR_BIT;
1334
if (encoding == CharEncoding::Latin1) {
1335
expectedBits |= JSString::LATIN1_CHARS_BIT;
1336
}
1337
1338
move32(Imm32(Mask), dest);
1339
and32(Address(str, JSString::offsetOfFlags()), dest);
1340
1341
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
1342
}
1343
1344
loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1345
}
1346
1347
void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
1348
MOZ_ASSERT(chars != str);
1349
storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1350
}
1351
1352
void MacroAssembler::loadInlineStringCharsForStore(Register str,
1353
Register dest) {
1354
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
1355
dest);
1356
}
1357
1358
void MacroAssembler::loadInlineStringChars(Register str, Register dest,
1359
CharEncoding encoding) {
1360
MOZ_ASSERT(str != dest);
1361
1362
if (JitOptions.spectreStringMitigations) {
1363
// Making this Spectre-safe is a bit complicated: using
1364
// computeEffectiveAddress and then zeroing the output register if
1365
// non-inline is not sufficient: when the index is very large, it would
1366
// allow reading |nullptr + index|. Just fall back to loadStringChars
1367
// for now.
1368
loadStringChars(str, dest, encoding);
1369
} else {
1370
computeEffectiveAddress(
1371
Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1372
}
1373
}
1374
1375
void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
1376
MOZ_ASSERT(str != dest);
1377
1378
if (JitOptions.spectreStringMitigations) {
1379
// Zero the output register if the input was not a rope.
1380
movePtr(ImmWord(0), dest);
1381
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1382
Imm32(JSString::LINEAR_BIT),
1383
Address(str, JSRope::offsetOfLeft()), dest);
1384
} else {
1385
loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1386
}
1387
}
1388
1389
void MacroAssembler::storeRopeChildren(Register left, Register right,
1390
Register str) {
1391
storePtr(left, Address(str, JSRope::offsetOfLeft()));
1392
storePtr(right, Address(str, JSRope::offsetOfRight()));
1393
}
1394
1395
void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
1396
MOZ_ASSERT(str != dest);
1397
1398
if (JitOptions.spectreStringMitigations) {
1399
// If the string is not a dependent string, zero the |str| register.
1400
// The code below loads str->base so this should block speculative
1401
// execution.
1402
movePtr(ImmWord(0), dest);
1403
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
1404
Imm32(JSString::DEPENDENT_BIT), dest, str);
1405
}
1406
1407
loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1408
}
1409
1410
void MacroAssembler::storeDependentStringBase(Register base, Register str) {
1411
storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1412
}
1413
1414
void MacroAssembler::loadStringChar(Register str, Register index,
1415
Register output, Register scratch,
1416
Label* fail) {
1417
MOZ_ASSERT(str != output);
1418
MOZ_ASSERT(str != index);
1419
MOZ_ASSERT(index != output);
1420
MOZ_ASSERT(output != scratch);
1421
1422
movePtr(str, output);
1423
1424
// This follows JSString::getChar.
1425
Label notRope;
1426
branchIfNotRope(str, &notRope);
1427
1428
loadRopeLeftChild(str, output);
1429
1430
// Check if the index is contained in the leftChild.
1431
// Todo: Handle index in the rightChild.
1432
spectreBoundsCheck32(index, Address(output, JSString::offsetOfLength()),
1433
scratch, fail);
1434
1435
// If the left side is another rope, give up.
1436
branchIfRope(output, fail);
1437
1438
bind(&notRope);
1439
1440
Label isLatin1, done;
1441
// We have to check the left/right side for ropes,
1442
// because a TwoByte rope might have a Latin1 child.
1443
branchLatin1String(output, &isLatin1);
1444
loadStringChars(output, scratch, CharEncoding::TwoByte);
1445
loadChar(scratch, index, output, CharEncoding::TwoByte);
1446
jump(&done);
1447
1448
bind(&isLatin1);
1449
loadStringChars(output, scratch, CharEncoding::Latin1);
1450
loadChar(scratch, index, output, CharEncoding::Latin1);
1451
1452
bind(&done);
1453
}
1454
1455
void MacroAssembler::loadStringIndexValue(Register str, Register dest,
1456
Label* fail) {
1457
MOZ_ASSERT(str != dest);
1458
1459
load32(Address(str, JSString::offsetOfFlags()), dest);
1460
1461
// Does not have a cached index value.
1462
branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1463
1464
// Extract the index.
1465
rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1466
}
1467
1468
void MacroAssembler::loadChar(Register chars, Register index, Register dest,
1469
CharEncoding encoding, int32_t offset /* = 0 */) {
1470
if (encoding == CharEncoding::Latin1) {
1471
loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
1472
} else {
1473
loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
1474
}
1475
}
1476
1477
void MacroAssembler::addToCharPtr(Register chars, Register index,
1478
CharEncoding encoding) {
1479
if (encoding == CharEncoding::Latin1) {
1480
static_assert(sizeof(char) == 1,
1481
"Latin-1 string index shouldn't need scaling");
1482
addPtr(index, chars);
1483
} else {
1484
computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
1485
}
1486
}
1487
1488
void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
1489
MOZ_ASSERT(digits != bigInt);
1490
1491
// Load the inline digits.
1492
computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
1493
digits);
1494
1495
// If inline digits aren't used, load the heap digits. Use a conditional move
1496
// to prevent speculative execution.
1497
cmp32LoadPtr(Assembler::GreaterThan,
1498
Address(bigInt, BigInt::offsetOfLength()),
1499
Imm32(int32_t(BigInt::inlineDigitsLength())),
1500
Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
1501
}
1502
1503
void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
1504
// This code follows the implementation of |BigInt::toUint64()|. We're also
1505
// using it for inline callers of |BigInt::toInt64()|, which works, because
1506
// all supported Jit architectures use a two's complement representation for
1507
// int64 values, which means the WrapToSigned call in toInt64() is a no-op.
1508
1509
Label done, nonZero;
1510
1511
branch32(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1512
Imm32(0), &nonZero);
1513
{
1514
move64(Imm64(0), dest);
1515
jump(&done);
1516
}
1517
bind(&nonZero);
1518
1519
#ifdef JS_PUNBOX64
1520
Register digits = dest.reg;
1521
#else
1522
Register digits = dest.high;
1523
#endif
1524
1525
loadBigIntDigits(bigInt, digits);
1526
1527
#if JS_PUNBOX64
1528
// Load the first digit into the destination register.
1529
load64(Address(digits, 0), dest);
1530
#else
1531
// Load the first digit into the destination register's low value.
1532
load32(Address(digits, 0), dest.low);
1533
1534
// And conditionally load the second digit into the high value register.
1535
Label twoDigits, digitsDone;
1536
branch32(Assembler::GreaterThan, Address(bigInt, BigInt::offsetOfLength()),
1537
Imm32(1), &twoDigits);
1538
{
1539
move32(Imm32(0), dest.high);
1540
jump(&digitsDone);
1541
}
1542
{
1543
bind(&twoDigits);
1544
load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
1545
}
1546
bind(&digitsDone);
1547
#endif
1548
1549
branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
1550
Imm32(BigInt::signBitMask()), &done);
1551
neg64(dest);
1552
1553
bind(&done);
1554
}
1555
1556
void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
1557
Register dest) {
1558
Label done, nonZero;
1559
branch32(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
1560
Imm32(0), &nonZero);
1561
{
1562
movePtr(ImmWord(0), dest);
1563
jump(&done);
1564
}
1565
bind(&nonZero);
1566
1567
loadBigIntDigits(bigInt, dest);
1568
1569
// Load the first digit into the destination register.
1570
loadPtr(Address(dest, 0), dest);
1571
1572
bind(&done);
1573
}
1574
1575
void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
1576
Register64 val) {
1577
MOZ_ASSERT(Scalar::isBigIntType(type));
1578
1579
uint32_t flags = BigInt::TYPE_FLAGS;
1580
1581
store32(Imm32(flags), Address(bigInt, BigInt::offsetOfFlags()));
1582
1583
Label done, nonZero;
1584
branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
1585
{
1586
store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
1587
jump(&done);
1588
}
1589
bind(&nonZero);
1590
1591
if (type == Scalar::BigInt64) {
1592
// Set the sign-bit for negative values and then continue with the two's
1593
// complement.
1594
Label isPositive;
1595
branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
1596
{
1597
store32(Imm32(BigInt::signBitMask() | flags),
1598
Address(bigInt, BigInt::offsetOfFlags()));
1599
neg64(val);
1600
}
1601
bind(&isPositive);
1602
}
1603
1604
store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
1605
1606
static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
1607
"BigInt Digit size matches uintptr_t, so there's a single "
1608
"store on 64-bit and up to two stores on 32-bit");
1609
1610