Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/MacroAssembler-inl.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/XorShift128PlusRNG.h"
#include <algorithm>
#include <utility>
#include "jit/AtomicOp.h"
#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
#include "jit/JitFrames.h"
#include "jit/JitOptions.h"
#include "jit/JitRuntime.h"
#include "jit/JitScript.h"
#include "jit/MoveEmitter.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/Simulator.h"
#include "jit/VMFunctions.h"
#include "js/Conversions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/ScalarType.h" // js::Scalar::Type
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Iteration.h"
#include "vm/JSContext.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmInstanceData.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmValidate.h"
#include "jit/TemplateObject-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/Interpreter-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using JS::ToInt32;
using mozilla::CheckedInt;
TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
const JitRuntime* rt = runtime()->jitRuntime();
return rt->preBarrier(type);
}
template <typename S, typename T>
static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
const S& value, const T& dest) {
switch (arrayType) {
case Scalar::Float32:
masm.storeFloat32(value, dest);
break;
case Scalar::Float64:
masm.storeDouble(value, dest);
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const BaseIndex& dest) {
StoreToTypedFloatArray(*this, arrayType, value, dest);
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const Address& dest) {
StoreToTypedFloatArray(*this, arrayType, value, dest);
}
template <typename S, typename T>
static void StoreToTypedBigIntArray(MacroAssembler& masm,
Scalar::Type arrayType, const S& value,
const T& dest) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
masm.store64(value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const BaseIndex& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const Address& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::boxUint32(Register source, ValueOperand dest,
Uint32Mode mode, Label* fail) {
switch (mode) {
// Fail if the value does not fit in an int32.
case Uint32Mode::FailOnDouble: {
branchTest32(Assembler::Signed, source, source, fail);
tagValue(JSVAL_TYPE_INT32, source, dest);
break;
}
case Uint32Mode::ForceDouble: {
// Always convert the value to double.
ScratchDoubleScope fpscratch(*this);
convertUInt32ToDouble(source, fpscratch);
boxDouble(fpscratch, dest, fpscratch);
break;
}
}
}
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
AnyRegister dest, Register temp,
Label* fail) {
switch (arrayType) {
case Scalar::Int8:
load8SignExtend(src, dest.gpr());
break;
case Scalar::Uint8:
case Scalar::Uint8Clamped:
load8ZeroExtend(src, dest.gpr());
break;
case Scalar::Int16:
load16SignExtend(src, dest.gpr());
break;
case Scalar::Uint16:
load16ZeroExtend(src, dest.gpr());
break;
case Scalar::Int32:
load32(src, dest.gpr());
break;
case Scalar::Uint32:
if (dest.isFloat()) {
load32(src, temp);
convertUInt32ToDouble(temp, dest.fpu());
} else {
load32(src, dest.gpr());
// Bail out if the value doesn't fit into a signed int32 value. This
// is what allows MLoadUnboxedScalar to have a type() of
// MIRType::Int32 for UInt32 array loads.
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
}
break;
case Scalar::Float32:
loadFloat32(src, dest.fpu());
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float64:
loadDouble(src, dest.fpu());
canonicalizeDouble(dest.fpu());
break;
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const Address& src,
AnyRegister dest,
Register temp, Label* fail);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const BaseIndex& src,
AnyRegister dest,
Register temp, Label* fail);
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp,
Label* fail) {
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Uint8Clamped:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
InvalidReg, nullptr);
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
break;
case Scalar::Uint32:
// Don't clobber dest when we could fail, instead use temp.
load32(src, temp);
boxUint32(temp, dest, uint32Mode, fail);
break;
case Scalar::Float32: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
dest.scratchReg(), nullptr);
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float64: {
ScratchDoubleScope fpscratch(*this);
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
dest.scratchReg(), nullptr);
boxDouble(fpscratch, dest, fpscratch);
break;
}
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const Address& src,
const ValueOperand& dest,
Uint32Mode uint32Mode,
Register temp, Label* fail);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const BaseIndex& src,
const ValueOperand& dest,
Uint32Mode uint32Mode,
Register temp, Label* fail);
template <typename T>
void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const T& src, Register bigInt,
Register64 temp) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
load64(src, temp);
initializeBigInt64(arrayType, bigInt, temp);
}
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const Address& src,
Register bigInt,
Register64 temp);
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const BaseIndex& src,
Register bigInt,
Register64 temp);
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
// and bails for anything that cannot be handled with our jit allocators.
void MacroAssembler::checkAllocatorState(Label* fail) {
// Don't execute the inline path if GC probes are built in.
#ifdef JS_GC_PROBES
jump(fail);
#endif
#ifdef JS_GC_ZEAL
// Don't execute the inline path if gc zeal or tracing are active.
const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
fail);
#endif
// Don't execute the inline path if the realm has an object metadata callback,
// as the metadata to use for the object may vary between executions of the
// op.
if (realm()->hasAllocationMetadataBuilder()) {
jump(fail);
}
}
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
gc::InitialHeap initialHeap) {
// Note that Ion elides barriers on writes to objects known to be in the
// nursery, so any allocation that can be made into the nursery must be made
// into the nursery, even if the nursery is disabled. At runtime these will
// take the out-of-line path, which is required to insert a barrier for the
// initializing writes.
return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
}
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
// this fills in the slots_ pointer.
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
gc::AllocKind allocKind,
size_t nDynamicSlots, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// We still need to allocate in the nursery, per the comment in
// shouldNurseryAllocate; however, we need to insert into the
// mallocedBuffers set, so bail to do the nursery allocation in the
// interpreter.
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
jump(fail);
return;
}
// Check whether this allocation site needs pretenuring. This dynamic check
// only happens for baseline code.
if (allocSite.is<Register>()) {
Register site = allocSite.as<Register>();
branchTestPtr(Assembler::NonZero,
Address(site, gc::AllocSite::offsetOfScriptAndState()),
Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
}
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
size_t totalSize = thingSize;
if (nDynamicSlots) {
totalSize += ObjectSlots::allocSize(nDynamicSlots);
}
MOZ_ASSERT(totalSize < INT32_MAX);
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
bumpPointerAllocate(result, temp, fail, zone,
zone->addressOfNurseryPosition(),
zone->addressOfNurseryCurrentEnd(), JS::TraceKind::Object,
totalSize, allocSite);
if (nDynamicSlots) {
store32(Imm32(nDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
store32(
Imm32(0),
Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
computeEffectiveAddress(
Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
}
}
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
void MacroAssembler::freeListAllocate(Register result, Register temp,
gc::AllocKind allocKind, Label* fail) {
CompileZone* zone = realm()->zone();
int thingSize = int(gc::Arena::thingSize(allocKind));
Label fallback;
Label success;
// Load the first and last offsets of |zone|'s free list for |allocKind|.
// If there is no room remaining in the span, fall back to get the next one.
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
// Bump the offset for the next allocation.
add32(Imm32(thingSize), result);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
sub32(Imm32(thingSize), result);
addPtr(temp, result); // Turn the offset into a pointer.
jump(&success);
bind(&fallback);
// If there are no free spans left, we bail to finish the allocation. The
// interpreter will call the GC allocator to set up a new arena to allocate
// from, after which we can resume allocating in the jit.
branchTest32(Assembler::Zero, result, result, fail);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
addPtr(temp, result); // Turn the offset into a pointer.
Push(result);
// Update the free list to point to the next span (which may be empty).
load32(Address(result, 0), result);
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
Pop(result);
bind(&success);
if (runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = zone->addressOfTenuredAllocCount();
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
void MacroAssembler::callFreeStub(Register slots) {
// This register must match the one in JitRuntime::generateFreeStub.
const Register regSlots = CallTempReg0;
push(regSlots);
movePtr(slots, regSlots);
call(runtime()->jitRuntime()->freeStub());
pop(regSlots);
}
// Inlined equivalent of gc::AllocateObject, without failure case handling.
void MacroAssembler::allocateObject(Register result, Register temp,
gc::AllocKind allocKind,
uint32_t nDynamicSlots,
gc::InitialHeap initialHeap, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
checkAllocatorState(fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
allocSite);
}
// Fall back to calling into the VM to allocate objects in the tenured heap
// that have dynamic slots.
if (nDynamicSlots) {
jump(fail);
return;
}
return freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::createGCObject(Register obj, Register temp,
const TemplateObject& templateObj,
gc::InitialHeap initialHeap, Label* fail,
bool initContents /* = true */) {
gc::AllocKind allocKind = templateObj.getAllocKind();
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
uint32_t nDynamicSlots = 0;
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
nDynamicSlots = ntemplate.numDynamicSlots();
}
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
initGCThing(obj, temp, templateObj, initContents);
}
void MacroAssembler::createPlainGCObject(
Register result, Register shape, Register temp, Register temp2,
uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
gc::InitialHeap initialHeap, Label* fail, const AllocSiteInput& allocSite,
bool initContents /* = true */) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements field.
storePtr(ImmPtr(emptyObjectElements),
Address(result, NativeObject::offsetOfElements()));
// Initialize fixed slots.
if (initContents) {
fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
temp, 0, numFixedSlots);
}
// Initialize dynamic slots.
if (numDynamicSlots > 0) {
loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
}
}
void MacroAssembler::createArrayWithFixedElements(
Register result, Register shape, Register temp, uint32_t arrayLength,
uint32_t arrayCapacity, gc::AllocKind allocKind,
gc::InitialHeap initialHeap, Label* fail, const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
MOZ_ASSERT(result != temp);
// This only supports allocating arrays with fixed elements and does not
// support any dynamic slots or elements.
MOZ_ASSERT(arrayCapacity >= arrayLength);
MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
arrayCapacity + ObjectElements::VALUES_PER_HEADER);
// Allocate object.
allocateObject(result, temp, allocKind, 0, initialHeap, fail, allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// There are no dynamic slots.
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
// Initialize elements pointer for fixed (inline) elements.
computeEffectiveAddress(
Address(result, NativeObject::offsetOfFixedElements()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfElements()));
// Initialize elements header.
store32(Imm32(ObjectElements::FIXED),
Address(temp, ObjectElements::offsetOfFlags()));
store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
store32(Imm32(arrayCapacity),
Address(temp, ObjectElements::offsetOfCapacity()));
store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
}
// Inline version of Nursery::allocateString.
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
gc::AllocKind allocKind,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
uint64_t* allocStrsPtr = &zone->zone()->nurseryAllocatedStrings.ref();
inc64(AbsoluteAddress(allocStrsPtr));
size_t thingSize = gc::Arena::thingSize(allocKind);
bumpPointerAllocate(result, temp, fail, zone,
zone->addressOfStringNurseryPosition(),
zone->addressOfStringNurseryCurrentEnd(),
JS::TraceKind::String, thingSize);
}
// Inline version of Nursery::allocateBigInt.
void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
bumpPointerAllocate(result, temp, fail, zone,
zone->addressOfBigIntNurseryPosition(),
zone->addressOfBigIntNurseryCurrentEnd(),
JS::TraceKind::BigInt, thingSize);
}
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
Label* fail, CompileZone* zone,
void* posAddr, const void* curEndAddr,
JS::TraceKind traceKind, uint32_t size,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(size >= gc::MinCellSize);
uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
// The position (allocation pointer) and the end pointer are stored
// very close to each other -- specifically, easily within a 32 bit offset.
// Use relative offsets between them, to avoid 64-bit immediate loads.
//
// I tried to optimise this further by using an extra register to avoid
// the final subtraction and hopefully get some more instruction
// parallelism, but it made no difference.
movePtr(ImmPtr(posAddr), temp);
loadPtr(Address(temp, 0), result);
addPtr(Imm32(totalSize), result);
CheckedInt<int32_t> endOffset =
(CheckedInt<uintptr_t>(uintptr_t(curEndAddr)) -
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
.toChecked<int32_t>();
MOZ_ASSERT(endOffset.isValid(), "Position and end pointers must be nearby");
branchPtr(Assembler::Below, Address(temp, endOffset.value()), result, fail);
storePtr(result, Address(temp, 0));
subPtr(Imm32(size), result);
if (runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = zone->addressOfNurseryAllocCount();
CheckedInt<int32_t> counterOffset =
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
.toChecked<int32_t>();
if (counterOffset.isValid()) {
add32(Imm32(1), Address(temp, counterOffset.value()));
} else {
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
if (allocSite.is<gc::CatchAllAllocSite>()) {
// No allocation site supplied. This is the case when called from Warp, or
// from places that don't support pretenuring.
gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
storePtr(ImmWord(zone->nurseryCellHeader(traceKind, siteKind)),
Address(result, -js::Nursery::nurseryCellHeaderSize()));
} else {
// Update allocation site and store pointer in the nursery cell header. This
// is only used from baseline.
Register site = allocSite.as<Register>();
updateAllocSite(temp, result, zone, site);
// See NurseryCellHeader::MakeValue.
orPtr(Imm32(int32_t(traceKind)), site);
storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
}
}
// Update the allocation site in the same way as Nursery::allocateCell.
void MacroAssembler::updateAllocSite(Register temp, Register result,
CompileZone* zone, Register site) {
Label done;
add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
branchPtr(Assembler::NotEqual,
Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()),
ImmPtr(nullptr), &done);
loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
bind(&done);
}
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
// allocation requested but unsuccessful.
void MacroAssembler::allocateString(Register result, Register temp,
gc::AllocKind allocKind,
gc::InitialHeap initialHeap, Label* fail) {
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
allocKind == gc::AllocKind::FAT_INLINE_STRING);
checkAllocatorState(fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
return nurseryAllocateString(result, temp, allocKind, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::newGCString(Register result, Register temp,
gc::InitialHeap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
}
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
gc::InitialHeap initialHeap,
Label* fail) {
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
initialHeap, fail);
}
void MacroAssembler::newGCBigInt(Register result, Register temp,
gc::InitialHeap initialHeap, Label* fail) {
checkAllocatorState(fail);
if (shouldNurseryAllocate(gc::AllocKind::BIGINT, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::DefaultHeap);
return nurseryAllocateBigInt(result, temp, fail);
}
freeListAllocate(result, temp, gc::AllocKind::BIGINT, fail);
}
void MacroAssembler::copySlotsFromTemplate(
Register obj, const TemplateNativeObject& templateObj, uint32_t start,
uint32_t end) {
uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
for (unsigned i = start; i < nfixed; i++) {
// Template objects are not exposed to script and therefore immutable.
// However, regexp template objects are sometimes used directly (when
// the cloning is not observable), and therefore we can end up with a
// non-zero lastIndex. Detect this case here and just substitute 0, to
// avoid racing with the main thread updating this slot.
Value v;
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
v = Int32Value(0);
} else {
v = templateObj.getSlot(i);
}
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
}
}
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
uint32_t start, uint32_t end,
const Value& v) {
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
if (start >= end) {
return;
}
#ifdef JS_NUNBOX32
// We only have a single spare register, so do the initialization as two
// strided writes of the tag and body.
Address addr = base;
move32(Imm32(v.toNunboxPayload()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToPayload(addr));
}
addr = base;
move32(Imm32(v.toNunboxTag()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToType(addr));
}
#else
moveValue(v, ValueOperand(temp));
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
storePtr(temp, base);
}
#endif
}
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
}
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end,
MagicValue(JS_UNINITIALIZED_LEXICAL));
}
static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
const TemplateNativeObject& templateObj, uint32_t nslots) {
MOZ_ASSERT(nslots == templateObj.slotSpan());
MOZ_ASSERT(nslots > 0);
uint32_t first = nslots;
for (; first != 0; --first) {
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
break;
}
}
uint32_t startOfUndefined = first;
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
for (; first != 0; --first) {
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
break;
}
}
}
uint32_t startOfUninitialized = first;
return {startOfUninitialized, startOfUndefined};
}
void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
Register lengthReg,
LiveRegisterSet liveRegs, Label* fail,
TypedArrayObject* templateObj,
TypedArrayLength lengthKind) {
MOZ_ASSERT(!templateObj->hasBuffer());
constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
static_assert(
TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
"fixed inline element data assumed to begin after the data slot");
static_assert(
TypedArrayObject::INLINE_BUFFER_LIMIT ==
JSObject::MAX_BYTE_SIZE - dataOffset,
"typed array inline buffer is limited by the maximum object byte size");
// Initialise data elements to zero.
size_t length = templateObj->length();
MOZ_ASSERT(length <= INT32_MAX,
"Template objects are only created for int32 lengths");
size_t nbytes = length * templateObj->bytesPerElement();
if (lengthKind == TypedArrayLength::Fixed &&
nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
// Store data elements inside the remaining JSObject slots.
computeEffectiveAddress(Address(obj, dataOffset), temp);
storePrivateValue(temp, Address(obj, dataSlotOffset));
// Write enough zero pointers into fixed data to zero every
// element. (This zeroes past the end of a byte count that's
// not a multiple of pointer size. That's okay, because fixed
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
// and we won't inline unless the desired memory fits in that
// space.)
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
for (size_t i = 0; i < numZeroPointers; i++) {
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
}
MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
} else {
if (lengthKind == TypedArrayLength::Fixed) {
move32(Imm32(length), lengthReg);
}
// Allocate a buffer on the heap to store the data elements.
liveRegs.addUnchecked(temp);
liveRegs.addUnchecked(obj);
liveRegs.addUnchecked(lengthReg);
PushRegsInMask(liveRegs);
using Fn = void (*)(JSContext * cx, TypedArrayObject * obj, int32_t count);
setupUnalignedABICall(temp);
loadJSContext(temp);
passABIArg(temp);
passABIArg(obj);
passABIArg(lengthReg);
callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
PopRegsInMask(liveRegs);
// Fail when data slot is UndefinedValue.
branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
}
}
void MacroAssembler::initGCSlots(Register obj, Register temp,
const TemplateNativeObject& templateObj) {
MOZ_ASSERT(!templateObj.isArrayObject());
// Slots of non-array objects are required to be initialized.
// Use the values currently in the template object.
uint32_t nslots = templateObj.slotSpan();
if (nslots == 0) {
return;
}
uint32_t nfixed = templateObj.numUsedFixedSlots();
uint32_t ndynamic = templateObj.numDynamicSlots();
// Attempt to group slot writes such that we minimize the amount of
// duplicated data we need to embed in code and load into registers. In
// general, most template object slots will be undefined except for any
// reserved slots. Since reserved slots come first, we split the object
// logically into independent non-UndefinedValue writes to the head and
// duplicated writes of UndefinedValue to the tail. For the majority of
// objects, the "tail" will be the entire slot range.
//
// The template object may be a CallObject, in which case we need to
// account for uninitialized lexical slots as well as undefined
// slots. Uninitialized lexical slots appears in CallObjects if the function
// has parameter expressions, in which case closed over parameters have
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
auto [startOfUninitialized, startOfUndefined] =
FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
MOZ_ASSERT_IF(!templateObj.isCallObject() &&
!templateObj.isBlockLexicalEnvironmentObject(),
startOfUninitialized == startOfUndefined);
// Copy over any preserved reserved slots.
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
// Fill the rest of the fixed slots with undefined and uninitialized.
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
std::min(startOfUndefined, nfixed));
if (startOfUndefined < nfixed) {
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
nfixed);
}
if (ndynamic) {
// We are short one register to do this elegantly. Borrow the obj
// register briefly for our slots base address.
push(obj);
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
// Fill uninitialized slots if necessary. Otherwise initialize all
// slots to undefined.
if (startOfUndefined > nfixed) {
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
startOfUndefined - nfixed);
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
fillSlotsWithUndefined(Address(obj, offset), temp,
startOfUndefined - nfixed, ndynamic);
} else {
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
}
pop(obj);
}
}
void MacroAssembler::initGCThing(Register obj, Register temp,
const TemplateObject& templateObj,
bool initContents) {
// Fast initialization of an empty object returned by allocateObject().
storePtr(ImmGCPtr(templateObj.shape()),
Address(obj, JSObject::offsetOfShape()));
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
MOZ_ASSERT(!ntemplate.hasDynamicElements());
// If the object has dynamic slots, the slots member has already been
// filled in.
if (!ntemplate.hasDynamicSlots()) {
storePtr(ImmPtr(emptyObjectSlots),
Address(obj, NativeObject::offsetOfSlots()));
}
if (ntemplate.isArrayObject()) {
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
int elementsOffset = NativeObject::offsetOfFixedElements();
computeEffectiveAddress(Address(obj, elementsOffset), temp);
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
// Fill in the elements header.
store32(
Imm32(ntemplate.getDenseCapacity()),
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
store32(Imm32(ntemplate.getDenseInitializedLength()),
Address(obj, elementsOffset +
ObjectElements::offsetOfInitializedLength()));
store32(Imm32(ntemplate.getArrayLength()),
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
store32(Imm32(ObjectElements::FIXED),
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
} else if (ntemplate.isArgumentsObject()) {
// The caller will initialize the reserved slots.
MOZ_ASSERT(!initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
} else {
// If the target type could be a TypedArray that maps shared memory
// then this would need to store emptyObjectElementsShared in that case.
MOZ_ASSERT(!ntemplate.isSharedMemory());
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
initGCSlots(obj, temp, ntemplate);
}
} else {
MOZ_CRASH("Unknown object");
}
#ifdef JS_GC_PROBES
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
regs.takeUnchecked(obj);
Register temp2 = regs.takeAnyGeneral();
using Fn = void (*)(JSObject * obj);
setupUnalignedABICall(temp2);
passABIArg(obj);
callWithABI<Fn, TraceCreateObject>();
PopRegsInMask(save);
#endif
}
void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
Register result, Label* fail) {
MOZ_ASSERT(left != result);
MOZ_ASSERT(right != result);
MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
Label notPointerEqual;
// If operands point to the same instance, the strings are trivially equal.
branchPtr(Assembler::NotEqual, left, right,
IsEqualityOp(op) ? &notPointerEqual : fail);
move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
op == JSOp::Ge),
result);
if (IsEqualityOp(op)) {
Label done;
jump(&done);
bind(&notPointerEqual);
Label leftIsNotAtom;
Label setNotEqualResult;
// Atoms cannot be equal to each other if they point to different strings.
Imm32 atomBit(JSString::ATOM_BIT);
branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
atomBit, &leftIsNotAtom);
branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
atomBit, &setNotEqualResult);
bind(&leftIsNotAtom);
// Strings of different length can never be equal.
loadStringLength(left, result);
branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
result, fail);
bind(&setNotEqualResult);
move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
bind(&done);
}
}
void MacroAssembler::loadStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
if (encoding == CharEncoding::Latin1) {
// If the string is a rope, zero the |str| register. The code below
// depends on str->flags so this should block speculative execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT), dest, str);
} else {
// If we're loading TwoByte chars, there's an additional risk:
// if the string has Latin1 chars, we could read out-of-bounds. To
// prevent this, we check both the Linear and Latin1 bits. We don't
// have a scratch register, so we use these flags also to block
// speculative execution, similar to the use of 0 above.
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
static constexpr uint32_t Mask =
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 1024,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
str);
}
}
// Load the inline chars.
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
// If it's not an inline string, load the non-inline chars. Use a
// conditional move to prevent speculative execution.
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::INLINE_CHARS_BIT),
Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is a rope, has inline chars, or has a different
// character encoding, set str to a near-null value to prevent
// speculative execution below (when reading str->nonInlineChars).
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
JSString::INLINE_CHARS_BIT |
JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 1024,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
uint32_t expectedBits = JSString::LINEAR_BIT;
if (encoding == CharEncoding::Latin1) {
expectedBits |= JSString::LATIN1_CHARS_BIT;
}
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
}
loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
MOZ_ASSERT(chars != str);
storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
}
void MacroAssembler::loadInlineStringCharsForStore(Register str,
Register dest) {
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
}
void MacroAssembler::loadInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Making this Spectre-safe is a bit complicated: using
// computeEffectiveAddress and then zeroing the output register if
// non-inline is not sufficient: when the index is very large, it would
// allow reading |nullptr + index|. Just fall back to loadStringChars
// for now.
loadStringChars(str, dest, encoding);
} else {
computeEffectiveAddress(
Address(str, JSInlineString::offsetOfInlineStorage()), dest);
}
}
void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfLeft()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
}
}
void MacroAssembler::loadRopeRightChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfRight()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfRight()), dest);
}
}
void MacroAssembler::storeRopeChildren(Register left, Register right,
Register str) {
storePtr(left, Address(str, JSRope::offsetOfLeft()));
storePtr(right, Address(str, JSRope::offsetOfRight()));
}
void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is not a dependent string, zero the |str| register.
// The code below loads str->base so this should block speculative
// execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::DEPENDENT_BIT), dest, str);
}
loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
}
void MacroAssembler::storeDependentStringBase(Register base, Register str) {
storePtr(base, Address(str, JSDependentString::offsetOfBase()));
}
void MacroAssembler::loadStringChar(Register str, Register index,
Register output, Register scratch1,
Register scratch2, Label* fail) {
MOZ_ASSERT(str != output);
MOZ_ASSERT(str != index);
MOZ_ASSERT(index != output);
MOZ_ASSERT(output != scratch1);
MOZ_ASSERT(output != scratch2);
// Use scratch1 for the index (adjusted below).
move32(index, scratch1);
movePtr(str, output);
// This follows JSString::getChar.
Label notRope;
branchIfNotRope(str, &notRope);
loadRopeLeftChild(str, output);
// Check if the index is contained in the leftChild.
Label loadedChild, notInLeft;
spectreBoundsCheck32(scratch1, Address(output, JSString::offsetOfLength()),
scratch2, &notInLeft);
jump(&loadedChild);
// The index must be in the rightChild.
// index -= rope->leftChild()->length()
bind(&notInLeft);
sub32(Address(output, JSString::offsetOfLength()), scratch1);
loadRopeRightChild(str, output);
// If the left or right side is another rope, give up.
bind(&loadedChild);
branchIfRope(output, fail);
bind(&notRope);
Label isLatin1, done;
// We have to check the left/right side for ropes,
// because a TwoByte rope might have a Latin1 child.
branchLatin1String(output, &isLatin1);
loadStringChars(output, scratch2, CharEncoding::TwoByte);
loadChar(scratch2, scratch1, output, CharEncoding::TwoByte);
jump(&done);
bind(&isLatin1);
loadStringChars(output, scratch2, CharEncoding::Latin1);
loadChar(scratch2, scratch1, output, CharEncoding::Latin1);
bind(&done);
}
void MacroAssembler::loadStringIndexValue(Register str, Register dest,
Label* fail) {
MOZ_ASSERT(str != dest);
load32(Address(str, JSString::offsetOfFlags()), dest);
// Does not have a cached index value.
branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
// Extract the index.
rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
}
void MacroAssembler::loadChar(Register chars, Register index, Register dest,
CharEncoding encoding, int32_t offset /* = 0 */) {
if (encoding == CharEncoding::Latin1) {
loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
} else {
loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
}
}
void MacroAssembler::addToCharPtr(Register chars, Register index,
CharEncoding encoding) {
if (encoding == CharEncoding::Latin1) {
static_assert(sizeof(char) == 1,
"Latin-1 string index shouldn't need scaling");
addPtr(index, chars);
} else {
computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
}
}
void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
MOZ_ASSERT(digits != bigInt);
// Load the inline digits.
computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
digits);
// If inline digits aren't used, load the heap digits. Use a conditional move
// to prevent speculative execution.
cmp32LoadPtr(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(int32_t(BigInt::inlineDigitsLength())),
Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
}
void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
// This code follows the implementation of |BigInt::toUint64()|. We're also
// using it for inline callers of |BigInt::toInt64()|, which works, because
// all supported Jit architectures use a two's complement representation for
// int64 values, which means the WrapToSigned call in toInt64() is a no-op.
Label done, nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
{
move64(Imm64(0), dest);
jump(&done);
}
bind(&nonZero);
#ifdef JS_PUNBOX64
Register digits = dest.reg;
#else
Register digits = dest.high;
#endif
loadBigIntDigits(bigInt, digits);
#if JS_PUNBOX64
// Load the first digit into the destination register.
load64(Address(digits, 0), dest);
#else
// Load the first digit into the destination register's low value.
load32(Address(digits, 0), dest.low);
// And conditionally load the second digit into the high value register.
Label twoDigits, digitsDone;
branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(1), &twoDigits);
{
move32(Imm32(0), dest.high);
jump(&digitsDone);
}
{
bind(&twoDigits);
load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
}
bind(&digitsDone);
#endif
branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
Imm32(BigInt::signBitMask()), &done);
neg64(dest);
bind(&done);
}
void MacroAssembler::loadFirstBigIntDigitOrZero(Register bigInt,
Register dest) {
Label done, nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
{
movePtr(ImmWord(0), dest);
jump(&done);
}
bind(&nonZero);
loadBigIntDigits(bigInt, dest);
// Load the first digit into the destination register.
loadPtr(Address(dest, 0), dest);
bind(&done);
}
void MacroAssembler::loadBigInt(Register bigInt, Register dest, Label* fail) {
Label done, nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
{
movePtr(ImmWord(0), dest);
jump(&done);
}
bind(&nonZero);
loadBigIntNonZero(bigInt, dest, fail);
bind(&done);
}
void MacroAssembler::loadBigIntNonZero(Register bigInt, Register dest,
Label* fail) {
MOZ_ASSERT(bigInt != dest);
#ifdef DEBUG
Label nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
assumeUnreachable("Unexpected zero BigInt");
bind(&nonZero);
#endif
branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(1), fail);
static_assert(BigInt::inlineDigitsLength() > 0,
"Single digit BigInts use inline storage");
// Load the first inline digit into the destination register.
loadPtr(Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
// Return as a signed pointer.
bigIntDigitToSignedPtr(bigInt, dest, fail);
}
void MacroAssembler::bigIntDigitToSignedPtr(Register bigInt, Register digit,
Label* fail) {
// BigInt digits are stored as absolute numbers. Take the failure path when
// the digit can't be stored in intptr_t.
branchTestPtr(Assembler::Signed, digit, digit, fail);
// Negate |dest| when the BigInt is negative.
Label nonNegative;
branchIfBigIntIsNonNegative(bigInt, &nonNegative);
negPtr(digit);
bind(&nonNegative);
}
void MacroAssembler::loadBigIntAbsolute(Register bigInt, Register dest,
Label* fail) {
MOZ_ASSERT(bigInt != dest);
branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(1), fail);
static_assert(BigInt::inlineDigitsLength() > 0,
"Single digit BigInts use inline storage");
// Load the first inline digit into the destination register.
movePtr(ImmWord(0), dest);
cmp32LoadPtr(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
Imm32(0), Address(bigInt, BigInt::offsetOfInlineDigits()), dest);