Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/MacroAssembler-inl.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/XorShift128PlusRNG.h"
#include <algorithm>
#include <utility>
#include "jit/AtomicOp.h"
#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
#include "jit/JitFrames.h"
#include "jit/JitOptions.h"
#include "jit/JitRuntime.h"
#include "jit/JitScript.h"
#include "jit/MoveEmitter.h"
#include "jit/ReciprocalMulConstants.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/Simulator.h"
#include "jit/VMFunctions.h"
#include "js/Conversions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/ScalarType.h" // js::Scalar::Type
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BoundFunctionObject.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Iteration.h"
#include "vm/JSContext.h"
#include "vm/JSFunction.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenConstants.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmGcObject.h"
#include "wasm/WasmInstanceData.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValidate.h"
#include "jit/TemplateObject-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSObject-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using JS::ToInt32;
using mozilla::CheckedInt;
TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
const JitRuntime* rt = runtime()->jitRuntime();
return rt->preBarrier(type);
}
template <typename S, typename T>
static void StoreToTypedFloatArray(MacroAssembler& masm, int arrayType,
const S& value, const T& dest) {
switch (arrayType) {
case Scalar::Float32:
masm.storeFloat32(value, dest);
break;
case Scalar::Float64:
masm.storeDouble(value, dest);
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const BaseIndex& dest) {
StoreToTypedFloatArray(*this, arrayType, value, dest);
}
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value,
const Address& dest) {
StoreToTypedFloatArray(*this, arrayType, value, dest);
}
template <typename S, typename T>
static void StoreToTypedBigIntArray(MacroAssembler& masm,
Scalar::Type arrayType, const S& value,
const T& dest) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
masm.store64(value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const BaseIndex& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::storeToTypedBigIntArray(Scalar::Type arrayType,
Register64 value,
const Address& dest) {
StoreToTypedBigIntArray(*this, arrayType, value, dest);
}
void MacroAssembler::boxUint32(Register source, ValueOperand dest,
Uint32Mode mode, Label* fail) {
switch (mode) {
// Fail if the value does not fit in an int32.
case Uint32Mode::FailOnDouble: {
branchTest32(Assembler::Signed, source, source, fail);
tagValue(JSVAL_TYPE_INT32, source, dest);
break;
}
case Uint32Mode::ForceDouble: {
// Always convert the value to double.
ScratchDoubleScope fpscratch(*this);
convertUInt32ToDouble(source, fpscratch);
boxDouble(fpscratch, dest, fpscratch);
break;
}
}
}
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
AnyRegister dest, Register temp,
Label* fail) {
switch (arrayType) {
case Scalar::Int8:
load8SignExtend(src, dest.gpr());
break;
case Scalar::Uint8:
case Scalar::Uint8Clamped:
load8ZeroExtend(src, dest.gpr());
break;
case Scalar::Int16:
load16SignExtend(src, dest.gpr());
break;
case Scalar::Uint16:
load16ZeroExtend(src, dest.gpr());
break;
case Scalar::Int32:
load32(src, dest.gpr());
break;
case Scalar::Uint32:
if (dest.isFloat()) {
load32(src, temp);
convertUInt32ToDouble(temp, dest.fpu());
} else {
load32(src, dest.gpr());
// Bail out if the value doesn't fit into a signed int32 value. This
// is what allows MLoadUnboxedScalar to have a type() of
// MIRType::Int32 for UInt32 array loads.
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
}
break;
case Scalar::Float32:
loadFloat32(src, dest.fpu());
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float64:
loadDouble(src, dest.fpu());
canonicalizeDouble(dest.fpu());
break;
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const Address& src,
AnyRegister dest,
Register temp, Label* fail);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const BaseIndex& src,
AnyRegister dest,
Register temp, Label* fail);
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp,
Label* fail) {
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Uint8Clamped:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
InvalidReg, nullptr);
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
break;
case Scalar::Uint32:
// Don't clobber dest when we could fail, instead use temp.
load32(src, temp);
boxUint32(temp, dest, uint32Mode, fail);
break;
case Scalar::Float32: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
dest.scratchReg(), nullptr);
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float64: {
ScratchDoubleScope fpscratch(*this);
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch),
dest.scratchReg(), nullptr);
boxDouble(fpscratch, dest, fpscratch);
break;
}
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const Address& src,
const ValueOperand& dest,
Uint32Mode uint32Mode,
Register temp, Label* fail);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const BaseIndex& src,
const ValueOperand& dest,
Uint32Mode uint32Mode,
Register temp, Label* fail);
template <typename T>
void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const T& src, Register bigInt,
Register64 temp) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
load64(src, temp);
initializeBigInt64(arrayType, bigInt, temp);
}
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const Address& src,
Register bigInt,
Register64 temp);
template void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const BaseIndex& src,
Register bigInt,
Register64 temp);
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
// and bails for anything that cannot be handled with our jit allocators.
void MacroAssembler::checkAllocatorState(Register temp, gc::AllocKind allocKind,
Label* fail) {
// Don't execute the inline path if GC probes are built in.
#ifdef JS_GC_PROBES
jump(fail);
#endif
#ifdef JS_GC_ZEAL
// Don't execute the inline path if gc zeal or tracing are active.
const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
fail);
#endif
// If the zone has a realm with an object allocation metadata hook, emit a
// guard for this. Note that IC stubs and some other trampolines can be shared
// across realms, so we don't bake in a realm pointer.
if (gc::IsObjectAllocKind(allocKind) &&
realm()->zone()->hasRealmWithAllocMetadataBuilder()) {
loadJSContext(temp);
loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
branchPtr(Assembler::NotEqual,
Address(temp, Realm::offsetOfAllocationMetadataBuilder()),
ImmWord(0), fail);
}
}
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
gc::Heap initialHeap) {
// Note that Ion elides barriers on writes to objects known to be in the
// nursery, so any allocation that can be made into the nursery must be made
// into the nursery, even if the nursery is disabled. At runtime these will
// take the out-of-line path, which is required to insert a barrier for the
// initializing writes.
return IsNurseryAllocable(allocKind) && initialHeap != gc::Heap::Tenured;
}
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
// this fills in the slots_ pointer.
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
gc::AllocKind allocKind,
size_t nDynamicSlots, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// Currently the JIT does not nursery allocate foreground finalized
// objects. This is allowed for objects that support this and have the
// JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
// though so disallow all foreground finalized objects for now.
MOZ_ASSERT(!IsForegroundFinalized(allocKind));
// We still need to allocate in the nursery, per the comment in
// shouldNurseryAllocate; however, we need to insert into the
// mallocedBuffers set, so bail to do the nursery allocation in the
// interpreter.
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
jump(fail);
return;
}
// Check whether this allocation site needs pretenuring. This dynamic check
// only happens for baseline code.
if (allocSite.is<Register>()) {
Register site = allocSite.as<Register>();
branchTestPtr(Assembler::NonZero,
Address(site, gc::AllocSite::offsetOfScriptAndState()),
Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
}
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
size_t totalSize = thingSize;
if (nDynamicSlots) {
totalSize += ObjectSlots::allocSize(nDynamicSlots);
}
MOZ_ASSERT(totalSize < INT32_MAX);
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::Object,
totalSize, allocSite);
if (nDynamicSlots) {
store32(Imm32(nDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
store32(
Imm32(0),
Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfMaybeUniqueId()));
computeEffectiveAddress(
Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
}
}
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
void MacroAssembler::freeListAllocate(Register result, Register temp,
gc::AllocKind allocKind, Label* fail) {
CompileZone* zone = realm()->zone();
int thingSize = int(gc::Arena::thingSize(allocKind));
Label fallback;
Label success;
// Load the first and last offsets of |zone|'s free list for |allocKind|.
// If there is no room remaining in the span, fall back to get the next one.
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
// Bump the offset for the next allocation.
add32(Imm32(thingSize), result);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
sub32(Imm32(thingSize), result);
addPtr(temp, result); // Turn the offset into a pointer.
jump(&success);
bind(&fallback);
// If there are no free spans left, we bail to finish the allocation. The
// interpreter will call the GC allocator to set up a new arena to allocate
// from, after which we can resume allocating in the jit.
branchTest32(Assembler::Zero, result, result, fail);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
addPtr(temp, result); // Turn the offset into a pointer.
Push(result);
// Update the free list to point to the next span (which may be empty).
load32(Address(result, 0), result);
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
Pop(result);
bind(&success);
if (runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = zone->addressOfTenuredAllocCount();
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
void MacroAssembler::callFreeStub(Register slots) {
// This register must match the one in JitRuntime::generateFreeStub.
const Register regSlots = CallTempReg0;
push(regSlots);
movePtr(slots, regSlots);
call(runtime()->jitRuntime()->freeStub());
pop(regSlots);
}
// Inlined equivalent of gc::AllocateObject, without failure case handling.
void MacroAssembler::allocateObject(Register result, Register temp,
gc::AllocKind allocKind,
uint32_t nDynamicSlots,
gc::Heap initialHeap, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
allocSite);
}
// Fall back to calling into the VM to allocate objects in the tenured heap
// that have dynamic slots.
if (nDynamicSlots) {
jump(fail);
return;
}
return freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::createGCObject(Register obj, Register temp,
const TemplateObject& templateObj,
gc::Heap initialHeap, Label* fail,
bool initContents /* = true */) {
gc::AllocKind allocKind = templateObj.getAllocKind();
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
uint32_t nDynamicSlots = 0;
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
nDynamicSlots = ntemplate.numDynamicSlots();
}
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
initGCThing(obj, temp, templateObj, initContents);
}
void MacroAssembler::createPlainGCObject(
Register result, Register shape, Register temp, Register temp2,
uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail, const AllocSiteInput& allocSite,
bool initContents /* = true */) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements field.
storePtr(ImmPtr(emptyObjectElements),
Address(result, NativeObject::offsetOfElements()));
// Initialize fixed slots.
if (initContents) {
fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
temp, 0, numFixedSlots);
}
// Initialize dynamic slots.
if (numDynamicSlots > 0) {
loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
}
}
void MacroAssembler::createArrayWithFixedElements(
Register result, Register shape, Register temp, Register dynamicSlotsTemp,
uint32_t arrayLength, uint32_t arrayCapacity, uint32_t numUsedDynamicSlots,
uint32_t numDynamicSlots, gc::AllocKind allocKind, gc::Heap initialHeap,
Label* fail, const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
MOZ_ASSERT(result != temp);
// This only supports allocating arrays with fixed elements and does not
// support any dynamic elements.
MOZ_ASSERT(arrayCapacity >= arrayLength);
MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
arrayCapacity + ObjectElements::VALUES_PER_HEADER);
MOZ_ASSERT(numUsedDynamicSlots <= numDynamicSlots);
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements pointer for fixed (inline) elements.
computeEffectiveAddress(
Address(result, NativeObject::offsetOfFixedElements()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfElements()));
// Initialize elements header.
store32(Imm32(ObjectElements::FIXED),
Address(temp, ObjectElements::offsetOfFlags()));
store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
store32(Imm32(arrayCapacity),
Address(temp, ObjectElements::offsetOfCapacity()));
store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
// Initialize dynamic slots.
if (numUsedDynamicSlots > 0) {
MOZ_ASSERT(dynamicSlotsTemp != temp);
MOZ_ASSERT(dynamicSlotsTemp != InvalidReg);
loadPtr(Address(result, NativeObject::offsetOfSlots()), dynamicSlotsTemp);
fillSlotsWithUndefined(Address(dynamicSlotsTemp, 0), temp, 0,
numUsedDynamicSlots);
}
}
// Inline version of Nursery::allocateString.
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
gc::AllocKind allocKind,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::String,
thingSize);
}
// Inline version of Nursery::allocateBigInt.
void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::BigInt,
thingSize);
}
static bool IsNurseryAllocEnabled(CompileZone* zone, JS::TraceKind kind) {
switch (kind) {
case JS::TraceKind::Object:
return zone->allocNurseryObjects();
case JS::TraceKind::String:
return zone->allocNurseryStrings();
case JS::TraceKind::BigInt:
return zone->allocNurseryBigInts();
default:
MOZ_CRASH("Bad nursery allocation kind");
}
}
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
Label* fail, CompileZone* zone,
JS::TraceKind traceKind, uint32_t size,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(size >= gc::MinCellSize);
uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
// We know statically whether nursery allocation is enable for a particular
// kind because we discard JIT code when this changes.
if (!IsNurseryAllocEnabled(zone, traceKind)) {
jump(fail);
return;
}
// Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
// avoid 64-bit immediate loads.
void* posAddr = zone->addressOfNurseryPosition();
int32_t endOffset = Nursery::offsetOfCurrentEndFromPosition();
movePtr(ImmPtr(posAddr), temp);
loadPtr(Address(temp, 0), result);
addPtr(Imm32(totalSize), result);
branchPtr(Assembler::Below, Address(temp, endOffset), result, fail);
storePtr(result, Address(temp, 0));
subPtr(Imm32(size), result);
if (allocSite.is<gc::CatchAllAllocSite>()) {
// No allocation site supplied. This is the case when called from Warp, or
// from places that don't support pretenuring.
gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
gc::AllocSite* site = zone->catchAllAllocSite(traceKind, siteKind);
uintptr_t headerWord = gc::NurseryCellHeader::MakeValue(site, traceKind);
storePtr(ImmWord(headerWord),
Address(result, -js::Nursery::nurseryCellHeaderSize()));
// Update the catch all allocation site for strings or if the profiler is
// enabled. This is used to calculate the nursery allocation count. The
// string data is used to determine whether to disable nursery string
// allocation.
if (traceKind == JS::TraceKind::String ||
runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = site->nurseryAllocCountAddress();
CheckedInt<int32_t> counterOffset =
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
.toChecked<int32_t>();
if (counterOffset.isValid()) {
add32(Imm32(1), Address(temp, counterOffset.value()));
} else {
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
} else {
// Update allocation site and store pointer in the nursery cell header. This
// is only used from baseline.
Register site = allocSite.as<Register>();
updateAllocSite(temp, result, zone, site);
// See NurseryCellHeader::MakeValue.
orPtr(Imm32(int32_t(traceKind)), site);
storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
}
}
// Update the allocation site in the same way as Nursery::allocateCell.
void MacroAssembler::updateAllocSite(Register temp, Register result,
CompileZone* zone, Register site) {
Label done;
add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
branch32(Assembler::NotEqual,
Address(site, gc::AllocSite::offsetOfNurseryAllocCount()), Imm32(1),
&done);
loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
bind(&done);
}
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
// allocation requested but unsuccessful.
void MacroAssembler::allocateString(Register result, Register temp,
gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail) {
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
allocKind == gc::AllocKind::FAT_INLINE_STRING);
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateString(result, temp, allocKind, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::newGCString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
}
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
initialHeap, fail);
}
void MacroAssembler::newGCBigInt(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
constexpr gc::AllocKind allocKind = gc::AllocKind::BIGINT;
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateBigInt(result, temp, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::copySlotsFromTemplate(
Register obj, const TemplateNativeObject& templateObj, uint32_t start,
uint32_t end) {
uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
for (unsigned i = start; i < nfixed; i++) {
// Template objects are not exposed to script and therefore immutable.
// However, regexp template objects are sometimes used directly (when
// the cloning is not observable), and therefore we can end up with a
// non-zero lastIndex. Detect this case here and just substitute 0, to
// avoid racing with the main thread updating this slot.
Value v;
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
v = Int32Value(0);
} else {
v = templateObj.getSlot(i);
}
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
}
}
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
uint32_t start, uint32_t end,
const Value& v) {
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
if (start >= end) {
return;
}
#ifdef JS_NUNBOX32
// We only have a single spare register, so do the initialization as two
// strided writes of the tag and body.
Address addr = base;
move32(Imm32(v.toNunboxPayload()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToPayload(addr));
}
addr = base;
move32(Imm32(v.toNunboxTag()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToType(addr));
}
#else
moveValue(v, ValueOperand(temp));
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
storePtr(temp, base);
}
#endif
}
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
}
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end,
MagicValue(JS_UNINITIALIZED_LEXICAL));
}
static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
const TemplateNativeObject& templateObj, uint32_t nslots) {
MOZ_ASSERT(nslots == templateObj.slotSpan());
MOZ_ASSERT(nslots > 0);
uint32_t first = nslots;
for (; first != 0; --first) {
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
break;
}
}
uint32_t startOfUndefined = first;
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
for (; first != 0; --first) {
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
break;
}
}
}
uint32_t startOfUninitialized = first;
return {startOfUninitialized, startOfUndefined};
}
void MacroAssembler::initTypedArraySlots(Register obj, Register temp,
Register lengthReg,
LiveRegisterSet liveRegs, Label* fail,
TypedArrayObject* templateObj,
TypedArrayLength lengthKind) {
MOZ_ASSERT(!templateObj->hasBuffer());
constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
static_assert(
TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
"fixed inline element data assumed to begin after the data slot");
static_assert(
TypedArrayObject::INLINE_BUFFER_LIMIT ==
JSObject::MAX_BYTE_SIZE - dataOffset,
"typed array inline buffer is limited by the maximum object byte size");
// Initialise data elements to zero.
size_t length = templateObj->length();
MOZ_ASSERT(length <= INT32_MAX,
"Template objects are only created for int32 lengths");
size_t nbytes = length * templateObj->bytesPerElement();
if (lengthKind == TypedArrayLength::Fixed &&
nbytes <= TypedArrayObject::INLINE_BUFFER_LIMIT) {
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
// Store data elements inside the remaining JSObject slots.
computeEffectiveAddress(Address(obj, dataOffset), temp);
storePrivateValue(temp, Address(obj, dataSlotOffset));
// Write enough zero pointers into fixed data to zero every
// element. (This zeroes past the end of a byte count that's
// not a multiple of pointer size. That's okay, because fixed
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
// and we won't inline unless the desired memory fits in that
// space.)
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
for (size_t i = 0; i < numZeroPointers; i++) {
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
}
MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
} else {
if (lengthKind == TypedArrayLength::Fixed) {
move32(Imm32(length), lengthReg);
}
// Ensure volatile |obj| is saved across the call.
if (obj.volatile_()) {
liveRegs.addUnchecked(obj);
}
// Allocate a buffer on the heap to store the data elements.
PushRegsInMask(liveRegs);
using Fn = void (*)(JSContext* cx, TypedArrayObject* obj, int32_t count);
setupUnalignedABICall(temp);
loadJSContext(temp);
passABIArg(temp);
passABIArg(obj);
passABIArg(lengthReg);
callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
PopRegsInMask(liveRegs);
// Fail when data slot is UndefinedValue.
branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
}
}
void MacroAssembler::initGCSlots(Register obj, Register temp,
const TemplateNativeObject& templateObj) {
MOZ_ASSERT(!templateObj.isArrayObject());
// Slots of non-array objects are required to be initialized.
// Use the values currently in the template object.
uint32_t nslots = templateObj.slotSpan();
if (nslots == 0) {
return;
}
uint32_t nfixed = templateObj.numUsedFixedSlots();
uint32_t ndynamic = templateObj.numDynamicSlots();
// Attempt to group slot writes such that we minimize the amount of
// duplicated data we need to embed in code and load into registers. In
// general, most template object slots will be undefined except for any
// reserved slots. Since reserved slots come first, we split the object
// logically into independent non-UndefinedValue writes to the head and
// duplicated writes of UndefinedValue to the tail. For the majority of
// objects, the "tail" will be the entire slot range.
//
// The template object may be a CallObject, in which case we need to
// account for uninitialized lexical slots as well as undefined
// slots. Uninitialized lexical slots appears in CallObjects if the function
// has parameter expressions, in which case closed over parameters have
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
auto [startOfUninitialized, startOfUndefined] =
FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
MOZ_ASSERT_IF(!templateObj.isCallObject() &&
!templateObj.isBlockLexicalEnvironmentObject(),
startOfUninitialized == startOfUndefined);
// Copy over any preserved reserved slots.
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
// Fill the rest of the fixed slots with undefined and uninitialized.
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
std::min(startOfUndefined, nfixed));
if (startOfUndefined < nfixed) {
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
nfixed);
}
if (ndynamic) {
// We are short one register to do this elegantly. Borrow the obj
// register briefly for our slots base address.
push(obj);
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
// Fill uninitialized slots if necessary. Otherwise initialize all
// slots to undefined.
if (startOfUndefined > nfixed) {
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
startOfUndefined - nfixed);
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
fillSlotsWithUndefined(Address(obj, offset), temp,
startOfUndefined - nfixed, ndynamic);
} else {
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
}
pop(obj);
}
}
void MacroAssembler::initGCThing(Register obj, Register temp,
const TemplateObject& templateObj,
bool initContents) {
// Fast initialization of an empty object returned by allocateObject().
storePtr(ImmGCPtr(templateObj.shape()),
Address(obj, JSObject::offsetOfShape()));
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
MOZ_ASSERT(!ntemplate.hasDynamicElements());
// If the object has dynamic slots, the slots member has already been
// filled in.
if (ntemplate.numDynamicSlots() == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(obj, NativeObject::offsetOfSlots()));
}
if (ntemplate.isArrayObject()) {
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
int elementsOffset = NativeObject::offsetOfFixedElements();
computeEffectiveAddress(Address(obj, elementsOffset), temp);
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
// Fill in the elements header.
store32(
Imm32(ntemplate.getDenseCapacity()),
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
store32(Imm32(ntemplate.getDenseInitializedLength()),
Address(obj, elementsOffset +
ObjectElements::offsetOfInitializedLength()));
store32(Imm32(ntemplate.getArrayLength()),
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
store32(Imm32(ObjectElements::FIXED),
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
} else if (ntemplate.isArgumentsObject()) {
// The caller will initialize the reserved slots.
MOZ_ASSERT(!initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
} else {
// If the target type could be a TypedArray that maps shared memory
// then this would need to store emptyObjectElementsShared in that case.
MOZ_ASSERT(!ntemplate.isSharedMemory());
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
initGCSlots(obj, temp, ntemplate);
}
} else {
MOZ_CRASH("Unknown object");
}
#ifdef JS_GC_PROBES
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
regs.takeUnchecked(obj);
Register temp2 = regs.takeAnyGeneral();
using Fn = void (*)(JSObject* obj);
setupUnalignedABICall(temp2);
passABIArg(obj);
callWithABI<Fn, TraceCreateObject>();
PopRegsInMask(save);
#endif
}
void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
Register result, Label* fail) {
MOZ_ASSERT(left != result);
MOZ_ASSERT(right != result);
MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
Label notPointerEqual;
// If operands point to the same instance, the strings are trivially equal.
branchPtr(Assembler::NotEqual, left, right,
IsEqualityOp(op) ? ¬PointerEqual : fail);
move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
op == JSOp::Ge),
result);
if (IsEqualityOp(op)) {
Label done;
jump(&done);
bind(¬PointerEqual);
Label leftIsNotAtom;
Label setNotEqualResult;
// Atoms cannot be equal to each other if they point to different strings.
Imm32 atomBit(JSString::ATOM_BIT);
branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
atomBit, &leftIsNotAtom);
branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
atomBit, &setNotEqualResult);
bind(&leftIsNotAtom);
// Strings of different length can never be equal.
loadStringLength(left, result);
branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
result, fail);
bind(&setNotEqualResult);
move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
bind(&done);
}
}
void MacroAssembler::loadStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
if (encoding == CharEncoding::Latin1) {
// If the string is a rope, zero the |str| register. The code below
// depends on str->flags so this should block speculative execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT), dest, str);
} else {
// If we're loading TwoByte chars, there's an additional risk:
// if the string has Latin1 chars, we could read out-of-bounds. To
// prevent this, we check both the Linear and Latin1 bits. We don't
// have a scratch register, so we use these flags also to block
// speculative execution, similar to the use of 0 above.
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
static constexpr uint32_t Mask =
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 1024,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
str);
}
}
// Load the inline chars.
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
// If it's not an inline string, load the non-inline chars. Use a
// conditional move to prevent speculative execution.
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::INLINE_CHARS_BIT),
Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is a rope, has inline chars, or has a different
// character encoding, set str to a near-null value to prevent
// speculative execution below (when reading str->nonInlineChars).
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
JSString::INLINE_CHARS_BIT |
JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 1024,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
uint32_t expectedBits = JSString::LINEAR_BIT;
if (encoding == CharEncoding::Latin1) {
expectedBits |= JSString::LATIN1_CHARS_BIT;
}
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
}
loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
MOZ_ASSERT(chars != str);
storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
}
void MacroAssembler::loadInlineStringCharsForStore(Register str,
Register dest) {
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
}
void MacroAssembler::loadInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Making this Spectre-safe is a bit complicated: using
// computeEffectiveAddress and then zeroing the output register if
// non-inline is not sufficient: when the index is very large, it would
// allow reading |nullptr + index|. Just fall back to loadStringChars
// for now.
loadStringChars(str, dest, encoding);
} else {
computeEffectiveAddress(
Address(str, JSInlineString::offsetOfInlineStorage()), dest);
}
}
void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfLeft()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
}
}
void MacroAssembler::loadRopeRightChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfRight()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfRight()), dest);
}
}
void MacroAssembler::storeRopeChildren(Register left, Register right,
Register str) {
storePtr(left, Address(str, JSRope::offsetOfLeft()));
storePtr(right, Address(str, JSRope::offsetOfRight()));
}
void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is not a dependent string, zero the |str| register.
// The code below loads str->base so this should block speculative
// execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::DEPENDENT_BIT), dest, str);
}
loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
}
void MacroAssembler::storeDependentStringBase(Register base, Register str) {
storePtr(base, Address(str, JSDependentString::offsetOfBase()));
}
void MacroAssembler::loadRopeChild(Register str, Register index,
Register output, Label* isLinear) {
// This follows JSString::getChar.
branchIfNotRope(str, isLinear);
loadRopeLeftChild(str, output);
// Check if the index is contained in the leftChild.
Label loadedChild;
branch32(Assembler::Above, Address(output, JSString::offsetOfLength()), index,
&loadedChild);
// The index must be in the rightChild.
loadRopeRightChild(str, output);
bind(&loadedChild);
}
void MacroAssembler::branchIfCanLoadStringChar(Register str, Register index,
Register scratch, Label* label) {
loadRopeChild(str, index, scratch, label);
// Branch if the left resp. right side is linear.
branchIfNotRope(scratch, label);
}
void MacroAssembler::branchIfNotCanLoadStringChar(Register str, Register index,
Register scratch,
Label* label) {
Label done;
loadRopeChild(str, index, scratch, &done);
// Branch if the left or right side is another rope.
branchIfRope(scratch, label);
bind(&done);
}
void MacroAssembler::loadStringChar(Register str, Register index,
Register output, Register scratch1,
Register scratch2, Label* fail) {
MOZ_ASSERT(str != output);
MOZ_ASSERT(str != index);
MOZ_ASSERT(index != output);
MOZ_ASSERT(output != scratch1);
MOZ_ASSERT(output != scratch2);
// Use scratch1 for the index (adjusted below).
move32(index, scratch1);
movePtr(str, output);
// This follows JSString::getChar.
Label notRope;
branchIfNotRope(str, ¬Rope);
loadRopeLeftChild(str, output);
// Check if the index is contained in the leftChild.
Label loadedChild, notInLeft;
spectreBoundsCheck32(scratch1, Address(output, JSString::offsetOfLength()),
scratch2, ¬InLeft);
jump(&loadedChild);
// The index must be in the rightChild.
// index -= rope->leftChild()->length()
bind(¬InLeft);
sub32(Address(output, JSString::offsetOfLength()), scratch1);
loadRopeRightChild(str, output);
// If the left or right side is another rope, give up.
bind(&loadedChild);
branchIfRope(output, fail);
bind(¬Rope);
Label isLatin1, done;
// We have to check the left/right side for ropes,
// because a TwoByte rope might have a Latin1 child.
branchLatin1String(output, &isLatin1);
loadStringChars(output, scratch2, CharEncoding::TwoByte);
loadChar(scratch2, scratch1, output, CharEncoding::TwoByte);
jump(&done);
bind(&isLatin1);
loadStringChars(output, scratch2, CharEncoding::Latin1);
loadChar(scratch2, scratch1, output, CharEncoding::Latin1);
bind(&done);
}
void MacroAssembler::loadStringIndexValue(Register str, Register dest,
Label* fail) {
MOZ_ASSERT(str != dest);
load32(Address(str, JSString::offsetOfFlags()), dest);
// Does not have a cached index value.
branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
// Extract the index.
rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
}
void MacroAssembler::loadChar(Register chars, Register index, Register dest,
CharEncoding encoding, int32_t offset /* = 0 */) {
if (encoding == CharEncoding::Latin1) {
loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
} else {
loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
}
}
void MacroAssembler::addToCharPtr(Register chars, Register index,
CharEncoding encoding) {
if (encoding == CharEncoding::Latin1) {
static_assert(sizeof(char) == 1,
"Latin-1 string index shouldn't need scaling");
addPtr(index, chars);
} else {
computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
}
}
void MacroAssembler::loadStringFromUnit(Register unit, Register dest,
const StaticStrings& staticStrings) {
movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
loadPtr(BaseIndex(dest, unit, ScalePointer), dest);
}
void MacroAssembler::loadLengthTwoString(Register c1, Register c2,
Register dest,
const StaticStrings& staticStrings) {
// Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
// to obtain the index into `StaticStrings::length2StaticTable`.
static_assert(sizeof(StaticStrings::SmallChar) == 1);
movePtr(ImmPtr(&StaticStrings::toSmallCharTable.storage), dest);
load8ZeroExtend(BaseIndex(dest, c1, Scale::TimesOne), c1);
load8ZeroExtend(BaseIndex(dest, c2, Scale::TimesOne), c2);
lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS), c1);
add32(c2, c1);
// Look up the string from the computed index.
movePtr(ImmPtr(&staticStrings.length2StaticTable), dest);
loadPtr(BaseIndex(dest, c1, ScalePointer), dest);
}
void MacroAssembler::loadInt32ToStringWithBase(
Register input, Register base, Register dest, Register scratch1,
Register scratch2, const StaticStrings& staticStrings,
const LiveRegisterSet& volatileRegs, Label* fail) {
#ifdef DEBUG
Label baseBad, baseOk;
branch32(Assembler::LessThan, base, Imm32(2), &baseBad);
branch32(Assembler::LessThanOrEqual, base, Imm32(36), &baseOk);
bind(&baseBad);
assumeUnreachable("base must be in range [2, 36]");
bind(&baseOk);
#endif
// Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
auto toChar = [this, base](Register r) {
#ifdef DEBUG
Label ok;
branch32(Assembler::Below, r, base, &ok);
assumeUnreachable("bad digit");
bind(&ok);
#else
// Silence unused lambda capture warning.
(void)base;
#endif
Label done;
add32(Imm32('0'), r);
branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
add32(Imm32('a' - '0' - 10), r);
bind(&done);
};
// Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
Label lengthTwo, done;
branch32(Assembler::AboveOrEqual, input, base, &lengthTwo);
{
move32(input, scratch1);
toChar(scratch1);
loadStringFromUnit(scratch1, dest, staticStrings);
jump(&done);
}
bind(&lengthTwo);
// Compute |base * base|.
move32(base, scratch1);
mul32(scratch1, scratch1);
// Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
branch32(Assembler::AboveOrEqual, input, scratch1, fail);
{
// Compute |scratch1 = input / base| and |scratch2 = input % base|.
move32(input, scratch1);
flexibleDivMod32(base, scratch1, scratch2, true, volatileRegs);
// Compute the digits of the divisor and remainder.
toChar(scratch1);
toChar(scratch2);
// Look up the 2-character digit string in the small-char table.
loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
}
bind(&done);
}
void MacroAssembler::loadInt32ToStringWithBase(
Register input, int32_t base, Register dest, Register scratch1,
Register scratch2, const StaticStrings& staticStrings, Label* fail) {
MOZ_ASSERT(2 <= base && base <= 36, "base must be in range [2, 36]");
// Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.