Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/MacroAssembler-inl.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Latin1.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/XorShift128PlusRNG.h"
#include <algorithm>
#include <limits>
#include <utility>
#include "jit/AtomicOp.h"
#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
#include "jit/JitFrames.h"
#include "jit/JitOptions.h"
#include "jit/JitRuntime.h"
#include "jit/JitScript.h"
#include "jit/MoveEmitter.h"
#include "jit/ReciprocalMulConstants.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/Simulator.h"
#include "jit/VMFunctions.h"
#include "js/Conversions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/GCAPI.h" // JS::AutoCheckCannotGC
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/Unicode.h"
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BoundFunctionObject.h"
#include "vm/DateObject.h"
#include "vm/DateTime.h"
#include "vm/Float16.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Iteration.h"
#include "vm/JSContext.h"
#include "vm/JSFunction.h"
#include "vm/StringType.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenConstants.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmInstanceData.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValidate.h"
#include "jit/TemplateObject-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSObject-inl.h"
#include "wasm/WasmGcObject-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using mozilla::CheckedInt;
TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
const JitRuntime* rt = runtime()->jitRuntime();
return rt->preBarrier(type);
}
template <typename T>
static void StoreToTypedFloatArray(MacroAssembler& masm, Scalar::Type arrayType,
FloatRegister value, const T& dest,
Register temp,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Float16:
masm.storeFloat16(value, dest, temp, volatileLiveRegs);
break;
case Scalar::Float32: {
if (value.isDouble()) {
ScratchFloat32Scope fpscratch(masm);
masm.convertDoubleToFloat32(value, fpscratch);
masm.storeFloat32(fpscratch, dest);
} else {
MOZ_ASSERT(value.isSingle());
masm.storeFloat32(value, dest);
}
break;
}
case Scalar::Float64:
MOZ_ASSERT(value.isDouble());
masm.storeDouble(value, dest);
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const BaseIndex& dest,
Register temp,
LiveRegisterSet volatileLiveRegs) {
StoreToTypedFloatArray(*this, arrayType, value, dest, temp, volatileLiveRegs);
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const Address& dest, Register temp,
LiveRegisterSet volatileLiveRegs) {
StoreToTypedFloatArray(*this, arrayType, value, dest, temp, volatileLiveRegs);
}
template <typename S, typename T>
static void StoreToTypedBigIntArray(MacroAssembler& masm,
Scalar::Type arrayType, const S& value,
const T& dest) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
masm.store64(value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const BaseIndex& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const Address& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::boxUint32(Register source, ValueOperand dest,
Uint32Mode mode, Label* fail) {
switch (mode) {
// Fail if the value does not fit in an int32.
case Uint32Mode::FailOnDouble: {
branchTest32(Assembler::Signed, source, source, fail);
tagValue(JSVAL_TYPE_INT32, source, dest);
break;
}
case Uint32Mode::ForceDouble: {
// Always convert the value to double.
ScratchDoubleScope fpscratch(*this);
convertUInt32ToDouble(source, fpscratch);
boxDouble(fpscratch, dest, fpscratch);
break;
}
}
}
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
AnyRegister dest, Register temp1,
Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Int8:
load8SignExtend(src, dest.gpr());
break;
case Scalar::Uint8:
case Scalar::Uint8Clamped:
load8ZeroExtend(src, dest.gpr());
break;
case Scalar::Int16:
load16SignExtend(src, dest.gpr());
break;
case Scalar::Uint16:
load16ZeroExtend(src, dest.gpr());
break;
case Scalar::Int32:
load32(src, dest.gpr());
break;
case Scalar::Uint32:
if (dest.isFloat()) {
load32(src, temp1);
convertUInt32ToDouble(temp1, dest.fpu());
} else {
load32(src, dest.gpr());
// Bail out if the value doesn't fit into a signed int32 value. This
// is what allows MLoadUnboxedScalar to have a type() of
// MIRType::Int32 for UInt32 array loads.
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
}
break;
case Scalar::Float16:
loadFloat16(src, dest.fpu(), temp1, temp2, volatileLiveRegs);
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float32:
loadFloat32(src, dest.fpu());
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float64:
loadDouble(src, dest.fpu());
canonicalizeDouble(dest.fpu());
break;
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const Address& src, AnyRegister dest,
Register temp1, Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs);
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
Register temp1, Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs);
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp,
Label* fail,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Uint8Clamped:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
InvalidReg, InvalidReg, nullptr, LiveRegisterSet{});
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
break;
case Scalar::Uint32:
load32(src, dest.scratchReg());
boxUint32(dest.scratchReg(), dest, uint32Mode, fail);
break;
case Scalar::Float16: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
dest.scratchReg(), temp, nullptr, volatileLiveRegs);
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float32: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch), InvalidReg,
InvalidReg, nullptr, LiveRegisterSet{});
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float64: {
ScratchDoubleScope fpscratch(*this);
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch), InvalidReg,
InvalidReg, nullptr, LiveRegisterSet{});
boxDouble(fpscratch, dest, fpscratch);
break;
}
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const Address& src, const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp, Label* fail,
LiveRegisterSet volatileLiveRegs);
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const BaseIndex& src, const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp, Label* fail,
LiveRegisterSet volatileLiveRegs);
template <typename T>
void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const T& src, Register bigInt,
Register64 temp) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
load64(src, temp);
initializeBigInt64(arrayType, bigInt, temp);
}
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const Address& src,
Register bigInt,
Register64 temp);
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const BaseIndex& src,
Register bigInt,
Register64 temp);
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
// and bails for anything that cannot be handled with our jit allocators.
void MacroAssembler::checkAllocatorState(Register temp, gc::AllocKind allocKind,
Label* fail) {
// Don't execute the inline path if GC probes are built in.
#ifdef JS_GC_PROBES
jump(fail);
#endif
#ifdef JS_GC_ZEAL
// Don't execute the inline path if gc zeal or tracing are active.
const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
fail);
#endif
// If the zone has a realm with an object allocation metadata hook, emit a
// guard for this. Note that IC stubs and some other trampolines can be shared
// across realms, so we don't bake in a realm pointer.
if (gc::IsObjectAllocKind(allocKind) &&
realm()->zone()->hasRealmWithAllocMetadataBuilder()) {
loadJSContext(temp);
loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
branchPtr(Assembler::NotEqual,
Address(temp, Realm::offsetOfAllocationMetadataBuilder()),
ImmWord(0), fail);
}
}
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
gc::Heap initialHeap) {
// Note that Ion elides barriers on writes to objects known to be in the
// nursery, so any allocation that can be made into the nursery must be made
// into the nursery, even if the nursery is disabled. At runtime these will
// take the out-of-line path, which is required to insert a barrier for the
// initializing writes.
return IsNurseryAllocable(allocKind) && initialHeap != gc::Heap::Tenured;
}
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
// this fills in the slots_ pointer.
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
gc::AllocKind allocKind,
size_t nDynamicSlots, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// Currently the JIT does not nursery allocate foreground finalized
// objects. This is allowed for objects that support this and have the
// JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
// though so disallow all foreground finalized objects for now.
MOZ_ASSERT(!IsForegroundFinalized(allocKind));
// We still need to allocate in the nursery, per the comment in
// shouldNurseryAllocate; however, we need to insert into the
// mallocedBuffers set, so bail to do the nursery allocation in the
// interpreter.
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
jump(fail);
return;
}
// Check whether this allocation site needs pretenuring. This dynamic check
// only happens for baseline code.
if (allocSite.is<Register>()) {
Register site = allocSite.as<Register>();
branchTestPtr(Assembler::NonZero,
Address(site, gc::AllocSite::offsetOfScriptAndState()),
Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
}
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
size_t totalSize = thingSize;
if (nDynamicSlots) {
totalSize += ObjectSlots::allocSize(nDynamicSlots);
}
MOZ_ASSERT(totalSize < INT32_MAX);
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::Object,
totalSize, allocSite);
if (nDynamicSlots) {
store32(Imm32(nDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
store32(
Imm32(0),
Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfMaybeUniqueId()));
computeEffectiveAddress(
Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
}
}
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
void MacroAssembler::freeListAllocate(Register result, Register temp,
gc::AllocKind allocKind, Label* fail) {
CompileZone* zone = realm()->zone();
int thingSize = int(gc::Arena::thingSize(allocKind));
Label fallback;
Label success;
// Load the first and last offsets of |zone|'s free list for |allocKind|.
// If there is no room remaining in the span, fall back to get the next one.
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
// Bump the offset for the next allocation.
add32(Imm32(thingSize), result);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
sub32(Imm32(thingSize), result);
addPtr(temp, result); // Turn the offset into a pointer.
jump(&success);
bind(&fallback);
// If there are no free spans left, we bail to finish the allocation. The
// interpreter will call the GC allocator to set up a new arena to allocate
// from, after which we can resume allocating in the jit.
branchTest32(Assembler::Zero, result, result, fail);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
addPtr(temp, result); // Turn the offset into a pointer.
Push(result);
// Update the free list to point to the next span (which may be empty).
load32(Address(result, 0), result);
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
Pop(result);
bind(&success);
if (runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = zone->addressOfTenuredAllocCount();
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
void MacroAssembler::callFreeStub(Register slots) {
// This register must match the one in JitRuntime::generateFreeStub.
const Register regSlots = CallTempReg0;
push(regSlots);
movePtr(slots, regSlots);
call(runtime()->jitRuntime()->freeStub());
pop(regSlots);
}
// Inlined equivalent of gc::AllocateObject, without failure case handling.
void MacroAssembler::allocateObject(Register result, Register temp,
gc::AllocKind allocKind,
uint32_t nDynamicSlots,
gc::Heap initialHeap, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
allocSite);
}
// Fall back to calling into the VM to allocate objects in the tenured heap
// that have dynamic slots.
if (nDynamicSlots) {
jump(fail);
return;
}
return freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::createGCObject(Register obj, Register temp,
const TemplateObject& templateObj,
gc::Heap initialHeap, Label* fail,
bool initContents /* = true */) {
gc::AllocKind allocKind = templateObj.getAllocKind();
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
uint32_t nDynamicSlots = 0;
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
nDynamicSlots = ntemplate.numDynamicSlots();
}
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
initGCThing(obj, temp, templateObj, initContents);
}
void MacroAssembler::createPlainGCObject(
Register result, Register shape, Register temp, Register temp2,
uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail, const AllocSiteInput& allocSite,
bool initContents /* = true */) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements field.
storePtr(ImmPtr(emptyObjectElements),
Address(result, NativeObject::offsetOfElements()));
// Initialize fixed slots.
if (initContents) {
fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
temp, 0, numFixedSlots);
}
// Initialize dynamic slots.
if (numDynamicSlots > 0) {
loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
}
}
void MacroAssembler::createArrayWithFixedElements(
Register result, Register shape, Register temp, Register dynamicSlotsTemp,
uint32_t arrayLength, uint32_t arrayCapacity, uint32_t numUsedDynamicSlots,
uint32_t numDynamicSlots, gc::AllocKind allocKind, gc::Heap initialHeap,
Label* fail, const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
MOZ_ASSERT(result != temp);
// This only supports allocating arrays with fixed elements and does not
// support any dynamic elements.
MOZ_ASSERT(arrayCapacity >= arrayLength);
MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
arrayCapacity + ObjectElements::VALUES_PER_HEADER);
MOZ_ASSERT(numUsedDynamicSlots <= numDynamicSlots);
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements pointer for fixed (inline) elements.
computeEffectiveAddress(
Address(result, NativeObject::offsetOfFixedElements()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfElements()));
// Initialize elements header.
store32(Imm32(ObjectElements::FIXED),
Address(temp, ObjectElements::offsetOfFlags()));
store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
store32(Imm32(arrayCapacity),
Address(temp, ObjectElements::offsetOfCapacity()));
store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
// Initialize dynamic slots.
if (numUsedDynamicSlots > 0) {
MOZ_ASSERT(dynamicSlotsTemp != temp);
MOZ_ASSERT(dynamicSlotsTemp != InvalidReg);
loadPtr(Address(result, NativeObject::offsetOfSlots()), dynamicSlotsTemp);
fillSlotsWithUndefined(Address(dynamicSlotsTemp, 0), temp, 0,
numUsedDynamicSlots);
}
}
// Inline version of Nursery::allocateString.
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
gc::AllocKind allocKind,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::String,
thingSize);
}
// Inline version of Nursery::allocateBigInt.
void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::BigInt,
thingSize);
}
static bool IsNurseryAllocEnabled(CompileZone* zone, JS::TraceKind kind) {
switch (kind) {
case JS::TraceKind::Object:
return zone->allocNurseryObjects();
case JS::TraceKind::String:
return zone->allocNurseryStrings();
case JS::TraceKind::BigInt:
return zone->allocNurseryBigInts();
default:
MOZ_CRASH("Bad nursery allocation kind");
}
}
// This function handles nursery allocations for JS. For wasm, see
// MacroAssembler::wasmBumpPointerAllocate.
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
Label* fail, CompileZone* zone,
JS::TraceKind traceKind, uint32_t size,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(size >= gc::MinCellSize);
uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
// We know statically whether nursery allocation is enable for a particular
// kind because we discard JIT code when this changes.
if (!IsNurseryAllocEnabled(zone, traceKind)) {
jump(fail);
return;
}
// Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
// avoid 64-bit immediate loads.
void* posAddr = zone->addressOfNurseryPosition();
int32_t endOffset = Nursery::offsetOfCurrentEndFromPosition();
movePtr(ImmPtr(posAddr), temp);
loadPtr(Address(temp, 0), result);
addPtr(Imm32(totalSize), result);
branchPtr(Assembler::Below, Address(temp, endOffset), result, fail);
storePtr(result, Address(temp, 0));
subPtr(Imm32(size), result);
if (allocSite.is<gc::CatchAllAllocSite>()) {
// No allocation site supplied. This is the case when called from Warp, or
// from places that don't support pretenuring.
gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
gc::AllocSite* site = zone->catchAllAllocSite(traceKind, siteKind);
uintptr_t headerWord = gc::NurseryCellHeader::MakeValue(site, traceKind);
storePtr(ImmWord(headerWord),
Address(result, -js::Nursery::nurseryCellHeaderSize()));
if (traceKind != JS::TraceKind::Object ||
runtime()->geckoProfiler().enabled()) {
// Update the catch all allocation site, which his is used to calculate
// nursery allocation counts so we can determine whether to disable
// nursery allocation of strings and bigints.
uint32_t* countAddress = site->nurseryAllocCountAddress();
CheckedInt<int32_t> counterOffset =
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
.toChecked<int32_t>();
if (counterOffset.isValid()) {
add32(Imm32(1), Address(temp, counterOffset.value()));
} else {
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
} else {
// Update allocation site and store pointer in the nursery cell header. This
// is only used from baseline.
Register site = allocSite.as<Register>();
updateAllocSite(temp, result, zone, site);
// See NurseryCellHeader::MakeValue.
orPtr(Imm32(int32_t(traceKind)), site);
storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
}
}
// Update the allocation site in the same way as Nursery::allocateCell.
void MacroAssembler::updateAllocSite(Register temp, Register result,
CompileZone* zone, Register site) {
Label done;
add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
branch32(Assembler::NotEqual,
Address(site, gc::AllocSite::offsetOfNurseryAllocCount()),
Imm32(js::gc::NormalSiteAttentionThreshold), &done);
loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
bind(&done);
}
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
// allocation requested but unsuccessful.
void MacroAssembler::allocateString(Register result, Register temp,
gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail) {
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
allocKind == gc::AllocKind::FAT_INLINE_STRING);
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateString(result, temp, allocKind, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::newGCString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
}
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
initialHeap, fail);
}
void MacroAssembler::newGCBigInt(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
constexpr gc::AllocKind allocKind = gc::AllocKind::BIGINT;
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateBigInt(result, temp, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::copySlotsFromTemplate(
Register obj, const TemplateNativeObject& templateObj, uint32_t start,
uint32_t end) {
uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
for (unsigned i = start; i < nfixed; i++) {
// Template objects are not exposed to script and therefore immutable.
// However, regexp template objects are sometimes used directly (when
// the cloning is not observable), and therefore we can end up with a
// non-zero lastIndex. Detect this case here and just substitute 0, to
// avoid racing with the main thread updating this slot.
Value v;
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
v = Int32Value(0);
} else {
v = templateObj.getSlot(i);
}
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
}
}
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
uint32_t start, uint32_t end,
const Value& v) {
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
if (start >= end) {
return;
}
#ifdef JS_NUNBOX32
// We only have a single spare register, so do the initialization as two
// strided writes of the tag and body.
Address addr = base;
move32(Imm32(v.toNunboxPayload()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToPayload(addr));
}
addr = base;
move32(Imm32(v.toNunboxTag()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToType(addr));
}
#else
moveValue(v, ValueOperand(temp));
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
storePtr(temp, base);
}
#endif
}
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
}
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end,
MagicValue(JS_UNINITIALIZED_LEXICAL));
}
static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
const TemplateNativeObject& templateObj, uint32_t nslots) {
MOZ_ASSERT(nslots == templateObj.slotSpan());
MOZ_ASSERT(nslots > 0);
uint32_t first = nslots;
for (; first != 0; --first) {
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
break;
}
}
uint32_t startOfUndefined = first;
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
for (; first != 0; --first) {
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
break;
}
}
}
uint32_t startOfUninitialized = first;
return {startOfUninitialized, startOfUndefined};
}
void MacroAssembler::initTypedArraySlots(
Register obj, Register temp, Register lengthReg, LiveRegisterSet liveRegs,
Label* fail, FixedLengthTypedArrayObject* templateObj,
TypedArrayLength lengthKind) {
MOZ_ASSERT(!templateObj->hasBuffer());
constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
static_assert(
FixedLengthTypedArrayObject::FIXED_DATA_START ==
FixedLengthTypedArrayObject::DATA_SLOT + 1,
"fixed inline element data assumed to begin after the data slot");
static_assert(
FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT ==
JSObject::MAX_BYTE_SIZE - dataOffset,
"typed array inline buffer is limited by the maximum object byte size");
// Initialise data elements to zero.
size_t length = templateObj->length();
MOZ_ASSERT(length <= INT32_MAX,
"Template objects are only created for int32 lengths");
size_t nbytes = length * templateObj->bytesPerElement();
if (lengthKind == TypedArrayLength::Fixed &&
nbytes <= FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT) {
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
// Store data elements inside the remaining JSObject slots.
computeEffectiveAddress(Address(obj, dataOffset), temp);
storePrivateValue(temp, Address(obj, dataSlotOffset));
// Write enough zero pointers into fixed data to zero every
// element. (This zeroes past the end of a byte count that's
// not a multiple of pointer size. That's okay, because fixed
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
// and we won't inline unless the desired memory fits in that
// space.)
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
for (size_t i = 0; i < numZeroPointers; i++) {
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
}
MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
} else {
if (lengthKind == TypedArrayLength::Fixed) {
move32(Imm32(length), lengthReg);
}
// Ensure volatile |obj| is saved across the call.
if (obj.volatile_()) {
liveRegs.addUnchecked(obj);
}
// Allocate a buffer on the heap to store the data elements.
PushRegsInMask(liveRegs);
using Fn = void (*)(JSContext* cx, TypedArrayObject* obj, int32_t count);
setupUnalignedABICall(temp);
loadJSContext(temp);
passABIArg(temp);
passABIArg(obj);
passABIArg(lengthReg);
callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
PopRegsInMask(liveRegs);
// Fail when data slot is UndefinedValue.
branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
}
}
void MacroAssembler::initGCSlots(Register obj, Register temp,
const TemplateNativeObject& templateObj) {
MOZ_ASSERT(!templateObj.isArrayObject());
// Slots of non-array objects are required to be initialized.
// Use the values currently in the template object.
uint32_t nslots = templateObj.slotSpan();
if (nslots == 0) {
return;
}
uint32_t nfixed = templateObj.numUsedFixedSlots();
uint32_t ndynamic = templateObj.numDynamicSlots();
// Attempt to group slot writes such that we minimize the amount of
// duplicated data we need to embed in code and load into registers. In
// general, most template object slots will be undefined except for any
// reserved slots. Since reserved slots come first, we split the object
// logically into independent non-UndefinedValue writes to the head and
// duplicated writes of UndefinedValue to the tail. For the majority of
// objects, the "tail" will be the entire slot range.
//
// The template object may be a CallObject, in which case we need to
// account for uninitialized lexical slots as well as undefined
// slots. Uninitialized lexical slots appears in CallObjects if the function
// has parameter expressions, in which case closed over parameters have
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
auto [startOfUninitialized, startOfUndefined] =
FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
MOZ_ASSERT_IF(!templateObj.isCallObject() &&
!templateObj.isBlockLexicalEnvironmentObject(),
startOfUninitialized == startOfUndefined);
// Copy over any preserved reserved slots.
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
// Fill the rest of the fixed slots with undefined and uninitialized.
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
std::min(startOfUndefined, nfixed));
if (startOfUndefined < nfixed) {
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
nfixed);
}
if (ndynamic) {
// We are short one register to do this elegantly. Borrow the obj
// register briefly for our slots base address.
push(obj);
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
// Fill uninitialized slots if necessary. Otherwise initialize all
// slots to undefined.
if (startOfUndefined > nfixed) {
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
startOfUndefined - nfixed);
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
fillSlotsWithUndefined(Address(obj, offset), temp,
startOfUndefined - nfixed, ndynamic);
} else {
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
}
pop(obj);
}
}
void MacroAssembler::initGCThing(Register obj, Register temp,
const TemplateObject& templateObj,
bool initContents) {
// Fast initialization of an empty object returned by allocateObject().
storePtr(ImmGCPtr(templateObj.shape()),
Address(obj, JSObject::offsetOfShape()));
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
MOZ_ASSERT(!ntemplate.hasDynamicElements());
// If the object has dynamic slots, the slots member has already been
// filled in.
if (ntemplate.numDynamicSlots() == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(obj, NativeObject::offsetOfSlots()));
}
if (ntemplate.isArrayObject()) {
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
int elementsOffset = NativeObject::offsetOfFixedElements();
computeEffectiveAddress(Address(obj, elementsOffset), temp);
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
// Fill in the elements header.
store32(
Imm32(ntemplate.getDenseCapacity()),
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
store32(Imm32(ntemplate.getDenseInitializedLength()),
Address(obj, elementsOffset +
ObjectElements::offsetOfInitializedLength()));
store32(Imm32(ntemplate.getArrayLength()),
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
store32(Imm32(ObjectElements::FIXED),
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
} else if (ntemplate.isArgumentsObject()) {
// The caller will initialize the reserved slots.
MOZ_ASSERT(!initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
} else {
// If the target type could be a TypedArray that maps shared memory
// then this would need to store emptyObjectElementsShared in that case.
MOZ_ASSERT(!ntemplate.isSharedMemory());
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
initGCSlots(obj, temp, ntemplate);
}
} else {