Source code
Revision control
Copy as Markdown
Other Tools
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
#include "jit/MacroAssembler-inl.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Latin1.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/XorShift128PlusRNG.h"
#include <algorithm>
#include <limits>
#include <utility>
#include "jit/AtomicOp.h"
#include "jit/AtomicOperations.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineJIT.h"
#include "jit/JitFrames.h"
#include "jit/JitOptions.h"
#include "jit/JitRuntime.h"
#include "jit/JitScript.h"
#include "jit/MoveEmitter.h"
#include "jit/ReciprocalMulConstants.h"
#include "jit/SharedICHelpers.h"
#include "jit/SharedICRegisters.h"
#include "jit/Simulator.h"
#include "jit/VMFunctions.h"
#include "js/Conversions.h"
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "js/GCAPI.h" // JS::AutoCheckCannotGC
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/Unicode.h"
#include "vm/ArgumentsObject.h"
#include "vm/ArrayBufferViewObject.h"
#include "vm/BoundFunctionObject.h"
#include "vm/DateObject.h"
#include "vm/DateTime.h"
#include "vm/Float16.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/Iteration.h"
#include "vm/JSContext.h"
#include "vm/JSFunction.h"
#include "vm/StringType.h"
#include "vm/TypedArrayObject.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCodegenConstants.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmInstanceData.h"
#include "wasm/WasmMemory.h"
#include "wasm/WasmTypeDef.h"
#include "wasm/WasmValidate.h"
#include "jit/TemplateObject-inl.h"
#include "vm/BytecodeUtil-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSObject-inl.h"
#include "wasm/WasmGcObject-inl.h"
using namespace js;
using namespace js::jit;
using JS::GenericNaN;
using mozilla::CheckedInt;
TrampolinePtr MacroAssembler::preBarrierTrampoline(MIRType type) {
const JitRuntime* rt = runtime()->jitRuntime();
return rt->preBarrier(type);
}
template <typename T>
void MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType,
FloatRegister value, const T& dest,
Register temp,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Float16:
storeFloat16(value, dest, temp, volatileLiveRegs);
break;
case Scalar::Float32: {
if (value.isDouble()) {
ScratchFloat32Scope fpscratch(*this);
convertDoubleToFloat32(value, fpscratch);
storeFloat32(fpscratch, dest);
} else {
MOZ_ASSERT(value.isSingle());
storeFloat32(value, dest);
}
break;
}
case Scalar::Float64:
MOZ_ASSERT(value.isDouble());
storeDouble(value, dest);
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::storeToTypedFloatArray(
Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
Register temp, LiveRegisterSet volatileLiveRegs);
template void MacroAssembler::storeToTypedFloatArray(
Scalar::Type arrayType, FloatRegister value, const Address& dest,
Register temp, LiveRegisterSet volatileLiveRegs);
void MacroAssembler::boxUint32(Register source, ValueOperand dest,
Uint32Mode mode, Label* fail) {
switch (mode) {
// Fail if the value does not fit in an int32.
case Uint32Mode::FailOnDouble: {
branchTest32(Assembler::Signed, source, source, fail);
tagValue(JSVAL_TYPE_INT32, source, dest);
break;
}
case Uint32Mode::ForceDouble: {
// Always convert the value to double.
ScratchDoubleScope fpscratch(*this);
convertUInt32ToDouble(source, fpscratch);
boxDouble(fpscratch, dest, fpscratch);
break;
}
}
}
template <typename T>
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src,
AnyRegister dest, Register temp1,
Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Int8:
load8SignExtend(src, dest.gpr());
break;
case Scalar::Uint8:
case Scalar::Uint8Clamped:
load8ZeroExtend(src, dest.gpr());
break;
case Scalar::Int16:
load16SignExtend(src, dest.gpr());
break;
case Scalar::Uint16:
load16ZeroExtend(src, dest.gpr());
break;
case Scalar::Int32:
load32(src, dest.gpr());
break;
case Scalar::Uint32:
if (dest.isFloat()) {
load32(src, temp1);
convertUInt32ToDouble(temp1, dest.fpu());
} else {
load32(src, dest.gpr());
// Bail out if the value doesn't fit into a signed int32 value. This
// is what allows MLoadUnboxedScalar to have a type() of
// MIRType::Int32 for UInt32 array loads.
branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
}
break;
case Scalar::Float16:
loadFloat16(src, dest.fpu(), temp1, temp2, volatileLiveRegs);
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float32:
loadFloat32(src, dest.fpu());
canonicalizeFloat(dest.fpu());
break;
case Scalar::Float64:
loadDouble(src, dest.fpu());
canonicalizeDouble(dest.fpu());
break;
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const Address& src, AnyRegister dest,
Register temp1, Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs);
template void MacroAssembler::loadFromTypedArray(
Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
Register temp1, Register temp2, Label* fail,
LiveRegisterSet volatileLiveRegs);
void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType,
const BaseIndex& src,
const ValueOperand& dest,
Uint32Mode uint32Mode, Register temp,
Label* fail,
LiveRegisterSet volatileLiveRegs) {
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Uint8Clamped:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()),
InvalidReg, InvalidReg, nullptr, LiveRegisterSet{});
tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
break;
case Scalar::Uint32:
load32(src, dest.scratchReg());
boxUint32(dest.scratchReg(), dest, uint32Mode, fail);
break;
case Scalar::Float16: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch),
dest.scratchReg(), temp, nullptr, volatileLiveRegs);
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float32: {
ScratchDoubleScope dscratch(*this);
FloatRegister fscratch = dscratch.asSingle();
loadFromTypedArray(arrayType, src, AnyRegister(fscratch), InvalidReg,
InvalidReg, nullptr, LiveRegisterSet{});
convertFloat32ToDouble(fscratch, dscratch);
boxDouble(dscratch, dest, dscratch);
break;
}
case Scalar::Float64: {
ScratchDoubleScope fpscratch(*this);
loadFromTypedArray(arrayType, src, AnyRegister(fpscratch), InvalidReg,
InvalidReg, nullptr, LiveRegisterSet{});
boxDouble(fpscratch, dest, fpscratch);
break;
}
case Scalar::BigInt64:
case Scalar::BigUint64:
default:
MOZ_CRASH("Invalid typed array type");
}
}
void MacroAssembler::loadFromTypedBigIntArray(Scalar::Type arrayType,
const BaseIndex& src,
const ValueOperand& dest,
Register bigInt,
Register64 temp) {
MOZ_ASSERT(Scalar::isBigIntType(arrayType));
load64(src, temp);
initializeBigInt64(arrayType, bigInt, temp);
tagValue(JSVAL_TYPE_BIGINT, bigInt, dest);
}
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
// and bails for anything that cannot be handled with our jit allocators.
void MacroAssembler::checkAllocatorState(Register temp, gc::AllocKind allocKind,
Label* fail) {
// Don't execute the inline path if GC probes are built in.
#ifdef JS_GC_PROBES
jump(fail);
#endif
#ifdef JS_GC_ZEAL
// Don't execute the inline path if gc zeal or tracing are active.
const uint32_t* ptrZealModeBits = runtime()->addressOfGCZealModeBits();
branch32(Assembler::NotEqual, AbsoluteAddress(ptrZealModeBits), Imm32(0),
fail);
#endif
// If the zone has a realm with an object allocation metadata hook, emit a
// guard for this. Note that IC stubs and some other trampolines can be shared
// across realms, so we don't bake in a realm pointer.
if (gc::IsObjectAllocKind(allocKind) &&
realm()->zone()->hasRealmWithAllocMetadataBuilder()) {
loadJSContext(temp);
loadPtr(Address(temp, JSContext::offsetOfRealm()), temp);
branchPtr(Assembler::NotEqual,
Address(temp, Realm::offsetOfAllocationMetadataBuilder()),
ImmWord(0), fail);
}
}
bool MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind,
gc::Heap initialHeap) {
// Note that Ion elides barriers on writes to objects known to be in the
// nursery, so any allocation that can be made into the nursery must be made
// into the nursery, even if the nursery is disabled. At runtime these will
// take the out-of-line path, which is required to insert a barrier for the
// initializing writes.
return IsNurseryAllocable(allocKind) && initialHeap != gc::Heap::Tenured;
}
// Inline version of Nursery::allocateObject. If the object has dynamic slots,
// this fills in the slots_ pointer.
void MacroAssembler::nurseryAllocateObject(Register result, Register temp,
gc::AllocKind allocKind,
size_t nDynamicSlots, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// Currently the JIT does not nursery allocate foreground finalized
// objects. This is allowed for objects that support this and have the
// JSCLASS_SKIP_NURSERY_FINALIZE class flag set. It's hard to assert that here
// though so disallow all foreground finalized objects for now.
MOZ_ASSERT(!IsForegroundFinalized(allocKind));
// We still need to allocate in the nursery, per the comment in
// shouldNurseryAllocate; however, we need to insert into the
// mallocedBuffers set, so bail to do the nursery allocation in the
// interpreter.
if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
jump(fail);
return;
}
// Check whether this allocation site needs pretenuring. This dynamic check
// only happens for baseline code.
if (allocSite.is<Register>()) {
Register site = allocSite.as<Register>();
branchTestPtr(Assembler::NonZero,
Address(site, gc::AllocSite::offsetOfScriptAndState()),
Imm32(gc::AllocSite::LONG_LIVED_BIT), fail);
}
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
size_t totalSize = thingSize;
if (nDynamicSlots) {
totalSize += ObjectSlots::allocSize(nDynamicSlots);
}
MOZ_ASSERT(totalSize < INT32_MAX);
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::Object,
totalSize, allocSite);
if (nDynamicSlots) {
store32(Imm32(nDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfCapacity()));
store32(
Imm32(0),
Address(result, thingSize + ObjectSlots::offsetOfDictionarySlotSpan()));
store64(Imm64(ObjectSlots::NoUniqueIdInDynamicSlots),
Address(result, thingSize + ObjectSlots::offsetOfMaybeUniqueId()));
computeEffectiveAddress(
Address(result, thingSize + ObjectSlots::offsetOfSlots()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
}
}
// Inlined version of FreeSpan::allocate. This does not fill in slots_.
void MacroAssembler::freeListAllocate(Register result, Register temp,
gc::AllocKind allocKind, Label* fail) {
CompileZone* zone = realm()->zone();
int thingSize = int(gc::Arena::thingSize(allocKind));
Label fallback;
Label success;
// Load the first and last offsets of |zone|'s free list for |allocKind|.
// If there is no room remaining in the span, fall back to get the next one.
gc::FreeSpan** ptrFreeList = zone->addressOfFreeList(allocKind);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
branch32(Assembler::AboveOrEqual, result, temp, &fallback);
// Bump the offset for the next allocation.
add32(Imm32(thingSize), result);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
sub32(Imm32(thingSize), result);
addPtr(temp, result); // Turn the offset into a pointer.
jump(&success);
bind(&fallback);
// If there are no free spans left, we bail to finish the allocation. The
// interpreter will call the GC allocator to set up a new arena to allocate
// from, after which we can resume allocating in the jit.
branchTest32(Assembler::Zero, result, result, fail);
loadPtr(AbsoluteAddress(ptrFreeList), temp);
addPtr(temp, result); // Turn the offset into a pointer.
Push(result);
// Update the free list to point to the next span (which may be empty).
load32(Address(result, 0), result);
store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
Pop(result);
bind(&success);
if (runtime()->geckoProfiler().enabled()) {
uint32_t* countAddress = zone->addressOfTenuredAllocCount();
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
// Inlined equivalent of gc::AllocateObject, without failure case handling.
void MacroAssembler::allocateObject(Register result, Register temp,
gc::AllocKind allocKind,
uint32_t nDynamicSlots,
gc::Heap initialHeap, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail,
allocSite);
}
// Fall back to calling into the VM to allocate objects in the tenured heap
// that have dynamic slots.
if (nDynamicSlots) {
jump(fail);
return;
}
return freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::createGCObject(Register obj, Register temp,
const TemplateObject& templateObj,
gc::Heap initialHeap, Label* fail,
bool initContents /* = true */,
const AllocSiteInput& allocSite) {
gc::AllocKind allocKind = templateObj.getAllocKind();
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
uint32_t nDynamicSlots = 0;
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
nDynamicSlots = ntemplate.numDynamicSlots();
}
allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail,
allocSite);
initGCThing(obj, temp, templateObj, initContents);
}
void MacroAssembler::createPlainGCObject(
Register result, Register shape, Register temp, Register temp2,
uint32_t numFixedSlots, uint32_t numDynamicSlots, gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail, const AllocSiteInput& allocSite,
bool initContents /* = true */) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements field.
storePtr(ImmPtr(emptyObjectElements),
Address(result, NativeObject::offsetOfElements()));
// Initialize fixed slots.
if (initContents) {
fillSlotsWithUndefined(Address(result, NativeObject::getFixedSlotOffset(0)),
temp, 0, numFixedSlots);
}
// Initialize dynamic slots.
if (numDynamicSlots > 0) {
loadPtr(Address(result, NativeObject::offsetOfSlots()), temp2);
fillSlotsWithUndefined(Address(temp2, 0), temp, 0, numDynamicSlots);
}
}
void MacroAssembler::createArrayWithFixedElements(
Register result, Register shape, Register temp, Register dynamicSlotsTemp,
uint32_t arrayLength, uint32_t arrayCapacity, uint32_t numUsedDynamicSlots,
uint32_t numDynamicSlots, gc::AllocKind allocKind, gc::Heap initialHeap,
Label* fail, const AllocSiteInput& allocSite) {
MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
MOZ_ASSERT(shape != temp, "shape can overlap with temp2, but not temp");
MOZ_ASSERT(result != temp);
// This only supports allocating arrays with fixed elements and does not
// support any dynamic elements.
MOZ_ASSERT(arrayCapacity >= arrayLength);
MOZ_ASSERT(gc::GetGCKindSlots(allocKind) >=
arrayCapacity + ObjectElements::VALUES_PER_HEADER);
MOZ_ASSERT(numUsedDynamicSlots <= numDynamicSlots);
// Allocate object.
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
storePtr(shape, Address(result, JSObject::offsetOfShape()));
// If the object has dynamic slots, allocateObject will initialize
// the slots field. If not, we must initialize it now.
if (numDynamicSlots == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
}
// Initialize elements pointer for fixed (inline) elements.
computeEffectiveAddress(
Address(result, NativeObject::offsetOfFixedElements()), temp);
storePtr(temp, Address(result, NativeObject::offsetOfElements()));
// Initialize elements header.
store32(Imm32(ObjectElements::FIXED),
Address(temp, ObjectElements::offsetOfFlags()));
store32(Imm32(0), Address(temp, ObjectElements::offsetOfInitializedLength()));
store32(Imm32(arrayCapacity),
Address(temp, ObjectElements::offsetOfCapacity()));
store32(Imm32(arrayLength), Address(temp, ObjectElements::offsetOfLength()));
// Initialize dynamic slots.
if (numUsedDynamicSlots > 0) {
MOZ_ASSERT(dynamicSlotsTemp != temp);
MOZ_ASSERT(dynamicSlotsTemp != InvalidReg);
loadPtr(Address(result, NativeObject::offsetOfSlots()), dynamicSlotsTemp);
fillSlotsWithUndefined(Address(dynamicSlotsTemp, 0), temp, 0,
numUsedDynamicSlots);
}
}
void MacroAssembler::createFunctionClone(Register result, Register canonical,
Register envChain, Register temp,
gc::AllocKind allocKind, Label* fail,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(allocKind == gc::AllocKind::FUNCTION ||
allocKind == gc::AllocKind::FUNCTION_EXTENDED);
MOZ_ASSERT(result != temp);
// Allocate object.
size_t numDynamicSlots = 0;
gc::Heap initialHeap = gc::Heap::Default;
allocateObject(result, temp, allocKind, numDynamicSlots, initialHeap, fail,
allocSite);
// Initialize shape field.
loadPtr(Address(canonical, JSObject::offsetOfShape()), temp);
storePtr(temp, Address(result, JSObject::offsetOfShape()));
// Initialize dynamic slots and elements pointers.
storePtr(ImmPtr(emptyObjectSlots),
Address(result, NativeObject::offsetOfSlots()));
storePtr(ImmPtr(emptyObjectElements),
Address(result, NativeObject::offsetOfElements()));
// Initialize FlagsAndArgCountSlot.
storeValue(Address(canonical, JSFunction::offsetOfFlagsAndArgCount()),
Address(result, JSFunction::offsetOfFlagsAndArgCount()), temp);
// Initialize NativeFuncOrInterpretedEnvSlot.
storeValue(JSVAL_TYPE_OBJECT, envChain,
Address(result, JSFunction::offsetOfEnvironment()));
#ifdef DEBUG
// The new function must be allocated in the nursery if the nursery is
// enabled. Assert no post-barrier is needed.
Label ok;
branchPtrInNurseryChunk(Assembler::Equal, result, temp, &ok);
branchPtrInNurseryChunk(Assembler::NotEqual, envChain, temp, &ok);
assumeUnreachable("Missing post write barrier in createFunctionClone");
bind(&ok);
#endif
// Initialize NativeJitInfoOrInterpretedScriptSlot. This is a BaseScript*
// pointer stored as PrivateValue.
loadPrivate(Address(canonical, JSFunction::offsetOfJitInfoOrScript()), temp);
storePrivateValue(temp,
Address(result, JSFunction::offsetOfJitInfoOrScript()));
// Initialize AtomSlot.
storeValue(Address(canonical, JSFunction::offsetOfAtom()),
Address(result, JSFunction::offsetOfAtom()), temp);
// Initialize extended slots.
if (allocKind == gc::AllocKind::FUNCTION_EXTENDED) {
for (size_t i = 0; i < FunctionExtended::NUM_EXTENDED_SLOTS; i++) {
Address addr(result, FunctionExtended::offsetOfExtendedSlot(i));
storeValue(UndefinedValue(), addr);
}
}
}
// Inline version of Nursery::allocateString.
void MacroAssembler::nurseryAllocateString(Register result, Register temp,
gc::AllocKind allocKind,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(allocKind));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(allocKind);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::String,
thingSize);
}
// Inline version of Nursery::allocateBigInt.
void MacroAssembler::nurseryAllocateBigInt(Register result, Register temp,
Label* fail) {
MOZ_ASSERT(IsNurseryAllocable(gc::AllocKind::BIGINT));
// No explicit check for nursery.isEnabled() is needed, as the comparison
// with the nursery's end will always fail in such cases.
CompileZone* zone = realm()->zone();
size_t thingSize = gc::Arena::thingSize(gc::AllocKind::BIGINT);
bumpPointerAllocate(result, temp, fail, zone, JS::TraceKind::BigInt,
thingSize);
}
static bool IsNurseryAllocEnabled(CompileZone* zone, JS::TraceKind kind) {
switch (kind) {
case JS::TraceKind::Object:
return zone->allocNurseryObjects();
case JS::TraceKind::String:
return zone->allocNurseryStrings();
case JS::TraceKind::BigInt:
return zone->allocNurseryBigInts();
default:
MOZ_CRASH("Bad nursery allocation kind");
}
}
// This function handles nursery allocations for JS. For wasm, see
// MacroAssembler::wasmBumpPointerAllocate.
void MacroAssembler::bumpPointerAllocate(Register result, Register temp,
Label* fail, CompileZone* zone,
JS::TraceKind traceKind, uint32_t size,
const AllocSiteInput& allocSite) {
MOZ_ASSERT(size >= gc::MinCellSize);
uint32_t totalSize = size + Nursery::nurseryCellHeaderSize();
MOZ_ASSERT(totalSize < INT32_MAX, "Nursery allocation too large");
MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
// We know statically whether nursery allocation is enable for a particular
// kind because we discard JIT code when this changes.
if (!IsNurseryAllocEnabled(zone, traceKind)) {
jump(fail);
return;
}
// Use a relative 32 bit offset to the Nursery position_ to currentEnd_ to
// avoid 64-bit immediate loads.
void* posAddr = zone->addressOfNurseryPosition();
int32_t endOffset = Nursery::offsetOfCurrentEndFromPosition();
movePtr(ImmPtr(posAddr), temp);
loadPtr(Address(temp, 0), result);
addPtr(Imm32(totalSize), result);
branchPtr(Assembler::Below, Address(temp, endOffset), result, fail);
storePtr(result, Address(temp, 0));
subPtr(Imm32(size), result);
if (allocSite.is<gc::CatchAllAllocSite>()) {
// No allocation site supplied. This is the case when called from Warp, or
// from places that don't support pretenuring.
gc::CatchAllAllocSite siteKind = allocSite.as<gc::CatchAllAllocSite>();
gc::AllocSite* site = zone->catchAllAllocSite(traceKind, siteKind);
uintptr_t headerWord = gc::NurseryCellHeader::MakeValue(site, traceKind);
storePtr(ImmWord(headerWord),
Address(result, -js::Nursery::nurseryCellHeaderSize()));
if (traceKind != JS::TraceKind::Object ||
runtime()->geckoProfiler().enabled()) {
// Update the catch all allocation site, which his is used to calculate
// nursery allocation counts so we can determine whether to disable
// nursery allocation of strings and bigints.
uint32_t* countAddress = site->nurseryAllocCountAddress();
CheckedInt<int32_t> counterOffset =
(CheckedInt<uintptr_t>(uintptr_t(countAddress)) -
CheckedInt<uintptr_t>(uintptr_t(posAddr)))
.toChecked<int32_t>();
if (counterOffset.isValid()) {
add32(Imm32(1), Address(temp, counterOffset.value()));
} else {
movePtr(ImmPtr(countAddress), temp);
add32(Imm32(1), Address(temp, 0));
}
}
} else {
// Update allocation site and store pointer in the nursery cell header. This
// is only used from baseline.
Register site = allocSite.as<Register>();
updateAllocSite(temp, result, zone, site);
// See NurseryCellHeader::MakeValue.
orPtr(Imm32(int32_t(traceKind)), site);
storePtr(site, Address(result, -js::Nursery::nurseryCellHeaderSize()));
}
}
// Update the allocation site in the same way as Nursery::allocateCell.
void MacroAssembler::updateAllocSite(Register temp, Register result,
CompileZone* zone, Register site) {
Label done;
add32(Imm32(1), Address(site, gc::AllocSite::offsetOfNurseryAllocCount()));
branch32(Assembler::NotEqual,
Address(site, gc::AllocSite::offsetOfNurseryAllocCount()),
Imm32(js::gc::NormalSiteAttentionThreshold), &done);
loadPtr(AbsoluteAddress(zone->addressOfNurseryAllocatedSites()), temp);
storePtr(temp, Address(site, gc::AllocSite::offsetOfNextNurseryAllocated()));
storePtr(site, AbsoluteAddress(zone->addressOfNurseryAllocatedSites()));
bind(&done);
}
// Inlined equivalent of gc::AllocateString, jumping to fail if nursery
// allocation requested but unsuccessful.
void MacroAssembler::allocateString(Register result, Register temp,
gc::AllocKind allocKind,
gc::Heap initialHeap, Label* fail) {
MOZ_ASSERT(allocKind == gc::AllocKind::STRING ||
allocKind == gc::AllocKind::FAT_INLINE_STRING);
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateString(result, temp, allocKind, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::newGCString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::STRING, initialHeap, fail);
}
void MacroAssembler::newGCFatInlineString(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
initialHeap, fail);
}
void MacroAssembler::newGCBigInt(Register result, Register temp,
gc::Heap initialHeap, Label* fail) {
constexpr gc::AllocKind allocKind = gc::AllocKind::BIGINT;
checkAllocatorState(temp, allocKind, fail);
if (shouldNurseryAllocate(allocKind, initialHeap)) {
MOZ_ASSERT(initialHeap == gc::Heap::Default);
return nurseryAllocateBigInt(result, temp, fail);
}
freeListAllocate(result, temp, allocKind, fail);
}
void MacroAssembler::copySlotsFromTemplate(
Register obj, const TemplateNativeObject& templateObj, uint32_t start,
uint32_t end) {
uint32_t nfixed = std::min(templateObj.numFixedSlots(), end);
for (unsigned i = start; i < nfixed; i++) {
// Template objects are not exposed to script and therefore immutable.
// However, regexp template objects are sometimes used directly (when
// the cloning is not observable), and therefore we can end up with a
// non-zero lastIndex. Detect this case here and just substitute 0, to
// avoid racing with the main thread updating this slot.
Value v;
if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot()) {
v = Int32Value(0);
} else {
v = templateObj.getSlot(i);
}
storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
}
}
void MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
uint32_t start, uint32_t end,
const Value& v) {
MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
if (start >= end) {
return;
}
#ifdef JS_NUNBOX32
// We only have a single spare register, so do the initialization as two
// strided writes of the tag and body.
Address addr = base;
move32(Imm32(v.toNunboxPayload()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToPayload(addr));
}
addr = base;
move32(Imm32(v.toNunboxTag()), temp);
for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtr<Value>)) {
store32(temp, ToType(addr));
}
#else
moveValue(v, ValueOperand(temp));
for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtr<Value>)) {
storePtr(temp, base);
}
#endif
}
void MacroAssembler::fillSlotsWithUndefined(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
}
void MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp,
uint32_t start, uint32_t end) {
fillSlotsWithConstantValue(base, temp, start, end,
MagicValue(JS_UNINITIALIZED_LEXICAL));
}
static std::pair<uint32_t, uint32_t> FindStartOfUninitializedAndUndefinedSlots(
const TemplateNativeObject& templateObj, uint32_t nslots) {
MOZ_ASSERT(nslots == templateObj.slotSpan());
MOZ_ASSERT(nslots > 0);
uint32_t first = nslots;
for (; first != 0; --first) {
if (templateObj.getSlot(first - 1) != UndefinedValue()) {
break;
}
}
uint32_t startOfUndefined = first;
if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
for (; first != 0; --first) {
if (!IsUninitializedLexical(templateObj.getSlot(first - 1))) {
break;
}
}
}
uint32_t startOfUninitialized = first;
return {startOfUninitialized, startOfUndefined};
}
void MacroAssembler::initTypedArraySlots(
Register obj, Register temp, Register lengthReg, LiveRegisterSet liveRegs,
Label* fail, FixedLengthTypedArrayObject* templateObj,
TypedArrayLength lengthKind) {
MOZ_ASSERT(!templateObj->hasBuffer());
constexpr size_t dataSlotOffset = ArrayBufferViewObject::dataOffset();
constexpr size_t dataOffset = dataSlotOffset + sizeof(HeapSlot);
static_assert(
FixedLengthTypedArrayObject::FIXED_DATA_START ==
FixedLengthTypedArrayObject::DATA_SLOT + 1,
"fixed inline element data assumed to begin after the data slot");
static_assert(
FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT ==
JSObject::MAX_BYTE_SIZE - dataOffset,
"typed array inline buffer is limited by the maximum object byte size");
// Initialise data elements to zero.
size_t length = templateObj->length();
MOZ_ASSERT(length <= INT32_MAX,
"Template objects are only created for int32 lengths");
size_t nbytes = length * templateObj->bytesPerElement();
if (lengthKind == TypedArrayLength::Fixed &&
nbytes <= FixedLengthTypedArrayObject::INLINE_BUFFER_LIMIT) {
MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
// Store data elements inside the remaining JSObject slots.
computeEffectiveAddress(Address(obj, dataOffset), temp);
storePrivateValue(temp, Address(obj, dataSlotOffset));
// Write enough zero pointers into fixed data to zero every
// element. (This zeroes past the end of a byte count that's
// not a multiple of pointer size. That's okay, because fixed
// data is a count of 8-byte HeapSlots (i.e. <= pointer size),
// and we won't inline unless the desired memory fits in that
// space.)
static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char*);
for (size_t i = 0; i < numZeroPointers; i++) {
storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char*)));
}
MOZ_ASSERT(nbytes > 0, "Zero-length TypedArrays need ZeroLengthArrayData");
} else {
if (lengthKind == TypedArrayLength::Fixed) {
move32(Imm32(length), lengthReg);
}
// Ensure volatile |obj| is saved across the call.
if (obj.volatile_()) {
liveRegs.addUnchecked(obj);
}
// Allocate a buffer on the heap to store the data elements.
PushRegsInMask(liveRegs);
using Fn = void (*)(JSContext* cx, TypedArrayObject* obj, int32_t count);
setupUnalignedABICall(temp);
loadJSContext(temp);
passABIArg(temp);
passABIArg(obj);
passABIArg(lengthReg);
callWithABI<Fn, AllocateAndInitTypedArrayBuffer>();
PopRegsInMask(liveRegs);
// Fail when data slot is UndefinedValue.
branchTestUndefined(Assembler::Equal, Address(obj, dataSlotOffset), fail);
}
}
void MacroAssembler::initGCSlots(Register obj, Register temp,
const TemplateNativeObject& templateObj) {
MOZ_ASSERT(!templateObj.isArrayObject());
// Slots of non-array objects are required to be initialized.
// Use the values currently in the template object.
uint32_t nslots = templateObj.slotSpan();
if (nslots == 0) {
return;
}
uint32_t nfixed = templateObj.numUsedFixedSlots();
uint32_t ndynamic = templateObj.numDynamicSlots();
// Attempt to group slot writes such that we minimize the amount of
// duplicated data we need to embed in code and load into registers. In
// general, most template object slots will be undefined except for any
// reserved slots. Since reserved slots come first, we split the object
// logically into independent non-UndefinedValue writes to the head and
// duplicated writes of UndefinedValue to the tail. For the majority of
// objects, the "tail" will be the entire slot range.
//
// The template object may be a CallObject, in which case we need to
// account for uninitialized lexical slots as well as undefined
// slots. Uninitialized lexical slots appears in CallObjects if the function
// has parameter expressions, in which case closed over parameters have
// TDZ. Uninitialized slots come before undefined slots in CallObjects.
auto [startOfUninitialized, startOfUndefined] =
FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots);
MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
MOZ_ASSERT_IF(!templateObj.isCallObject() &&
!templateObj.isBlockLexicalEnvironmentObject(),
startOfUninitialized == startOfUndefined);
// Copy over any preserved reserved slots.
copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
// Fill the rest of the fixed slots with undefined and uninitialized.
size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
fillSlotsWithUninitialized(Address(obj, offset), temp, startOfUninitialized,
std::min(startOfUndefined, nfixed));
if (startOfUndefined < nfixed) {
offset = NativeObject::getFixedSlotOffset(startOfUndefined);
fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined,
nfixed);
}
if (ndynamic) {
// We are short one register to do this elegantly. Borrow the obj
// register briefly for our slots base address.
push(obj);
loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
// Fill uninitialized slots if necessary. Otherwise initialize all
// slots to undefined.
if (startOfUndefined > nfixed) {
MOZ_ASSERT(startOfUninitialized != startOfUndefined);
fillSlotsWithUninitialized(Address(obj, 0), temp, 0,
startOfUndefined - nfixed);
size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
fillSlotsWithUndefined(Address(obj, offset), temp,
startOfUndefined - nfixed, ndynamic);
} else {
fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
}
pop(obj);
}
}
void MacroAssembler::initGCThing(Register obj, Register temp,
const TemplateObject& templateObj,
bool initContents) {
// Fast initialization of an empty object returned by allocateObject().
storePtr(ImmGCPtr(templateObj.shape()),
Address(obj, JSObject::offsetOfShape()));
if (templateObj.isNativeObject()) {
const TemplateNativeObject& ntemplate =
templateObj.asTemplateNativeObject();
MOZ_ASSERT(!ntemplate.hasDynamicElements());
// If the object has dynamic slots, the slots member has already been
// filled in.
if (ntemplate.numDynamicSlots() == 0) {
storePtr(ImmPtr(emptyObjectSlots),
Address(obj, NativeObject::offsetOfSlots()));
}
if (ntemplate.isArrayObject()) {
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
int elementsOffset = NativeObject::offsetOfFixedElements();
computeEffectiveAddress(Address(obj, elementsOffset), temp);
storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
// Fill in the elements header.
store32(
Imm32(ntemplate.getDenseCapacity()),
Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
store32(Imm32(ntemplate.getDenseInitializedLength()),
Address(obj, elementsOffset +
ObjectElements::offsetOfInitializedLength()));
store32(Imm32(ntemplate.getArrayLength()),
Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
store32(Imm32(ObjectElements::FIXED),
Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
} else if (ntemplate.isArgumentsObject()) {
// The caller will initialize the reserved slots.
MOZ_ASSERT(!initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
} else {
// If the target type could be a TypedArray that maps shared memory
// then this would need to store emptyObjectElementsShared in that case.
MOZ_ASSERT(!ntemplate.isSharedMemory());
// Can't skip initializing reserved slots.
MOZ_ASSERT(initContents);
storePtr(ImmPtr(emptyObjectElements),
Address(obj, NativeObject::offsetOfElements()));
initGCSlots(obj, temp, ntemplate);
}
} else {
MOZ_CRASH("Unknown object");
}
#ifdef JS_GC_PROBES
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
regs.takeUnchecked(obj);
Register temp2 = regs.takeAnyGeneral();
using Fn = void (*)(JSObject* obj);
setupUnalignedABICall(temp2);
passABIArg(obj);
callWithABI<Fn, TraceCreateObject>();
PopRegsInMask(save);
#endif
}
static size_t StringCharsByteLength(const JSLinearString* linear) {
CharEncoding encoding =
linear->hasLatin1Chars() ? CharEncoding::Latin1 : CharEncoding::TwoByte;
size_t encodingSize = encoding == CharEncoding::Latin1
? sizeof(JS::Latin1Char)
: sizeof(char16_t);
return linear->length() * encodingSize;
}
bool MacroAssembler::canCompareStringCharsInline(const JSLinearString* linear) {
// Limit the number of inline instructions used for character comparisons. Use
// the same instruction limit for both encodings, i.e. two-byte uses half the
// limit of Latin-1 strings.
constexpr size_t ByteLengthCompareCutoff = 32;
size_t byteLength = StringCharsByteLength(linear);
return 0 < byteLength && byteLength <= ByteLengthCompareCutoff;
}
template <typename T, typename CharT>
static inline T CopyCharacters(const CharT* chars) {
T value = 0;
std::memcpy(&value, chars, sizeof(T));
return value;
}
template <typename T>
static inline T CopyCharacters(const JSLinearString* linear, size_t index) {
JS::AutoCheckCannotGC nogc;
if (linear->hasLatin1Chars()) {
MOZ_ASSERT(index + sizeof(T) / sizeof(JS::Latin1Char) <= linear->length());
return CopyCharacters<T>(linear->latin1Chars(nogc) + index);
}
MOZ_ASSERT(sizeof(T) >= sizeof(char16_t));
MOZ_ASSERT(index + sizeof(T) / sizeof(char16_t) <= linear->length());
return CopyCharacters<T>(linear->twoByteChars(nogc) + index);
}
void MacroAssembler::branchIfNotStringCharsEquals(Register stringChars,
const JSLinearString* linear,
Label* label) {
CharEncoding encoding =
linear->hasLatin1Chars() ? CharEncoding::Latin1 : CharEncoding::TwoByte;
size_t encodingSize = encoding == CharEncoding::Latin1
? sizeof(JS::Latin1Char)
: sizeof(char16_t);
size_t byteLength = StringCharsByteLength(linear);
size_t pos = 0;
for (size_t stride : {8, 4, 2, 1}) {
while (byteLength >= stride) {
Address addr(stringChars, pos * encodingSize);
switch (stride) {
case 8: {
auto x = CopyCharacters<uint64_t>(linear, pos);
branch64(Assembler::NotEqual, addr, Imm64(x), label);
break;
}
case 4: {
auto x = CopyCharacters<uint32_t>(linear, pos);
branch32(Assembler::NotEqual, addr, Imm32(x), label);
break;
}
case 2: {
auto x = CopyCharacters<uint16_t>(linear, pos);
branch16(Assembler::NotEqual, addr, Imm32(x), label);
break;
}
case 1: {
auto x = CopyCharacters<uint8_t>(linear, pos);
branch8(Assembler::NotEqual, addr, Imm32(x), label);
break;
}
}
byteLength -= stride;
pos += stride / encodingSize;
}
// Prefer a single comparison for trailing bytes instead of doing
// multiple consecutive comparisons.
//
// For example when comparing against the string "example", emit two
// four-byte comparisons against "exam" and "mple" instead of doing
// three comparisons against "exam", "pl", and finally "e".
if (pos > 0 && byteLength > stride / 2) {
MOZ_ASSERT(stride == 8 || stride == 4);
size_t prev = pos - (stride - byteLength) / encodingSize;
Address addr(stringChars, prev * encodingSize);
switch (stride) {
case 8: {
auto x = CopyCharacters<uint64_t>(linear, prev);
branch64(Assembler::NotEqual, addr, Imm64(x), label);
break;
}
case 4: {
auto x = CopyCharacters<uint32_t>(linear, prev);
branch32(Assembler::NotEqual, addr, Imm32(x), label);
break;
}
}
// Break from the loop, because we've finished the complete string.
break;
}
}
}
void MacroAssembler::loadStringCharsForCompare(Register input,
const JSLinearString* linear,
Register stringChars,
Label* fail) {
CharEncoding encoding =
linear->hasLatin1Chars() ? CharEncoding::Latin1 : CharEncoding::TwoByte;
// Take the slow path when the string is a rope or has a different character
// representation.
branchIfRope(input, fail);
if (encoding == CharEncoding::Latin1) {
branchTwoByteString(input, fail);
} else {
JS::AutoCheckCannotGC nogc;
if (mozilla::IsUtf16Latin1(linear->twoByteRange(nogc))) {
branchLatin1String(input, fail);
} else {
// This case was already handled in the caller.
#ifdef DEBUG
Label ok;
branchTwoByteString(input, &ok);
assumeUnreachable("Unexpected Latin-1 string");
bind(&ok);
#endif
}
}
#ifdef DEBUG
{
size_t length = linear->length();
MOZ_ASSERT(length > 0);
Label ok;
branch32(Assembler::AboveOrEqual,
Address(input, JSString::offsetOfLength()), Imm32(length), &ok);
assumeUnreachable("Input mustn't be smaller than search string");
bind(&ok);
}
#endif
// Load the input string's characters.
loadStringChars(input, stringChars, encoding);
}
void MacroAssembler::compareStringChars(JSOp op, Register stringChars,
const JSLinearString* linear,
Register output) {
MOZ_ASSERT(IsEqualityOp(op));
size_t byteLength = StringCharsByteLength(linear);
// Prefer a single compare-and-set instruction if possible.
if (byteLength == 1 || byteLength == 2 || byteLength == 4 ||
byteLength == 8) {
auto cond = JSOpToCondition(op, /* isSigned = */ false);
Address addr(stringChars, 0);
switch (byteLength) {
case 8: {
auto x = CopyCharacters<uint64_t>(linear, 0);
cmp64Set(cond, addr, Imm64(x), output);
break;
}
case 4: {
auto x = CopyCharacters<uint32_t>(linear, 0);
cmp32Set(cond, addr, Imm32(x), output);
break;
}
case 2: {
auto x = CopyCharacters<uint16_t>(linear, 0);
cmp16Set(cond, addr, Imm32(x), output);
break;
}
case 1: {
auto x = CopyCharacters<uint8_t>(linear, 0);
cmp8Set(cond, addr, Imm32(x), output);
break;
}
}
} else {
Label setNotEqualResult;
branchIfNotStringCharsEquals(stringChars, linear, &setNotEqualResult);
// Falls through if both strings are equal.
Label done;
move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq), output);
jump(&done);
bind(&setNotEqualResult);
move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), output);
bind(&done);
}
}
void MacroAssembler::compareStrings(JSOp op, Register left, Register right,
Register result, Label* fail) {
MOZ_ASSERT(left != result);
MOZ_ASSERT(right != result);
MOZ_ASSERT(IsEqualityOp(op) || IsRelationalOp(op));
Label notPointerEqual;
// If operands point to the same instance, the strings are trivially equal.
branchPtr(Assembler::NotEqual, left, right,
IsEqualityOp(op) ? ¬PointerEqual : fail);
move32(Imm32(op == JSOp::Eq || op == JSOp::StrictEq || op == JSOp::Le ||
op == JSOp::Ge),
result);
if (IsEqualityOp(op)) {
Label done;
jump(&done);
bind(¬PointerEqual);
Label leftIsNotAtom;
Label setNotEqualResult;
// Atoms cannot be equal to each other if they point to different strings.
Imm32 atomBit(JSString::ATOM_BIT);
branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()),
atomBit, &leftIsNotAtom);
branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()),
atomBit, &setNotEqualResult);
bind(&leftIsNotAtom);
// Strings of different length can never be equal.
loadStringLength(left, result);
branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()),
result, fail);
bind(&setNotEqualResult);
move32(Imm32(op == JSOp::Ne || op == JSOp::StrictNe), result);
bind(&done);
}
}
void MacroAssembler::loadStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
if (encoding == CharEncoding::Latin1) {
// If the string is a rope, zero the |str| register. The code below
// depends on str->flags so this should block speculative execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT), dest, str);
} else {
// If we're loading TwoByte chars, there's an additional risk:
// if the string has Latin1 chars, we could read out-of-bounds. To
// prevent this, we check both the Linear and Latin1 bits. We don't
// have a scratch register, so we use these flags also to block
// speculative execution, similar to the use of 0 above.
MOZ_ASSERT(encoding == CharEncoding::TwoByte);
static constexpr uint32_t Mask =
JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 2048,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT), dest,
str);
}
}
// Load the inline chars.
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
// If it's not an inline string, load the non-inline chars. Use a
// conditional move to prevent speculative execution.
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::INLINE_CHARS_BIT),
Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::loadNonInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is a rope, has inline chars, or has a different
// character encoding, set str to a near-null value to prevent
// speculative execution below (when reading str->nonInlineChars).
static constexpr uint32_t Mask = JSString::LINEAR_BIT |
JSString::INLINE_CHARS_BIT |
JSString::LATIN1_CHARS_BIT;
static_assert(Mask < 2048,
"Mask should be a small, near-null value to ensure we "
"block speculative execution when it's used as string "
"pointer");
uint32_t expectedBits = JSString::LINEAR_BIT;
if (encoding == CharEncoding::Latin1) {
expectedBits |= JSString::LATIN1_CHARS_BIT;
}
move32(Imm32(Mask), dest);
and32(Address(str, JSString::offsetOfFlags()), dest);
cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits), dest, str);
}
loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
}
void MacroAssembler::storeNonInlineStringChars(Register chars, Register str) {
MOZ_ASSERT(chars != str);
storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
}
void MacroAssembler::loadInlineStringCharsForStore(Register str,
Register dest) {
computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()),
dest);
}
void MacroAssembler::loadInlineStringChars(Register str, Register dest,
CharEncoding encoding) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Making this Spectre-safe is a bit complicated: using
// computeEffectiveAddress and then zeroing the output register if
// non-inline is not sufficient: when the index is very large, it would
// allow reading |nullptr + index|. Just fall back to loadStringChars
// for now.
loadStringChars(str, dest, encoding);
} else {
computeEffectiveAddress(
Address(str, JSInlineString::offsetOfInlineStorage()), dest);
}
}
void MacroAssembler::loadRopeLeftChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfLeft()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
}
}
void MacroAssembler::loadRopeRightChild(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// Zero the output register if the input was not a rope.
movePtr(ImmWord(0), dest);
test32LoadPtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::LINEAR_BIT),
Address(str, JSRope::offsetOfRight()), dest);
} else {
loadPtr(Address(str, JSRope::offsetOfRight()), dest);
}
}
void MacroAssembler::storeRopeChildren(Register left, Register right,
Register str) {
storePtr(left, Address(str, JSRope::offsetOfLeft()));
storePtr(right, Address(str, JSRope::offsetOfRight()));
}
void MacroAssembler::loadDependentStringBase(Register str, Register dest) {
MOZ_ASSERT(str != dest);
if (JitOptions.spectreStringMitigations) {
// If the string is not a dependent string, zero the |str| register.
// The code below loads str->base so this should block speculative
// execution.
movePtr(ImmWord(0), dest);
test32MovePtr(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::DEPENDENT_BIT), dest, str);
}
loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
}
void MacroAssembler::storeDependentStringBase(Register base, Register str) {
storePtr(base, Address(str, JSDependentString::offsetOfBase()));
}
void MacroAssembler::branchIfMaybeSplitSurrogatePair(Register leftChild,
Register index,
Register scratch,
Label* maybeSplit,
Label* notSplit) {
// If |index| is the last character of the left child and the left child
// is a two-byte string, it's possible that a surrogate pair is split
// between the left and right child of a rope.
// Can't be a split surrogate when the left child is a Latin-1 string.
branchLatin1String(leftChild, notSplit);
// Can't be a split surrogate when |index + 1| is in the left child.
add32(Imm32(1), index, scratch);
branch32(Assembler::Above, Address(leftChild, JSString::offsetOfLength()),
scratch, notSplit);
// Jump to |maybeSplit| if the left child is another rope.
branchIfRope(leftChild, maybeSplit);
// Load the character at |index|.
loadStringChars(leftChild, scratch, CharEncoding::TwoByte);
loadChar(scratch, index, scratch, CharEncoding::TwoByte);
// Jump to |maybeSplit| if the last character is a lead surrogate.
branchIfLeadSurrogate(scratch, scratch, maybeSplit);
}
void MacroAssembler::loadRopeChild(CharKind kind, Register str, Register index,
Register output, Register maybeScratch,
Label* isLinear, Label* splitSurrogate) {
// This follows JSString::getChar.
branchIfNotRope(str, isLinear);
loadRopeLeftChild(str, output);
Label loadedChild;
if (kind == CharKind::CharCode) {
// Check if |index| is contained in the left child.
branch32(Assembler::Above, Address(output, JSString::offsetOfLength()),
index, &loadedChild);
} else {
MOZ_ASSERT(maybeScratch != InvalidReg);
// Check if |index| is contained in the left child.
Label loadRight;
branch32(Assembler::BelowOrEqual,
Address(output, JSString::offsetOfLength()), index, &loadRight);
{
// Handle possible split surrogate pairs.
branchIfMaybeSplitSurrogatePair(output, index, maybeScratch,
splitSurrogate, &loadedChild);
jump(&loadedChild);
}
bind(&loadRight);
}
// The index must be in the rightChild.
loadRopeRightChild(str, output);
bind(&loadedChild);
}
void MacroAssembler::branchIfCanLoadStringChar(CharKind kind, Register str,
Register index, Register scratch,
Register maybeScratch,
Label* label) {
Label splitSurrogate;
loadRopeChild(kind, str, index, scratch, maybeScratch, label,
&splitSurrogate);
// Branch if the left resp. right side is linear.
branchIfNotRope(scratch, label);
if (kind == CharKind::CodePoint) {
bind(&splitSurrogate);
}
}
void MacroAssembler::branchIfNotCanLoadStringChar(CharKind kind, Register str,
Register index,
Register scratch,
Register maybeScratch,
Label* label) {
Label done;
loadRopeChild(kind, str, index, scratch, maybeScratch, &done, label);
// Branch if the left or right side is another rope.
branchIfRope(scratch, label);
bind(&done);
}
void MacroAssembler::loadStringChar(CharKind kind, Register str, Register index,
Register output, Register scratch1,
Register scratch2, Label* fail) {
MOZ_ASSERT(str != output);
MOZ_ASSERT(str != index);
MOZ_ASSERT(index != output);
MOZ_ASSERT_IF(kind == CharKind::CodePoint, index != scratch1);
MOZ_ASSERT(output != scratch1);
MOZ_ASSERT(output != scratch2);
// Use scratch1 for the index (adjusted below).
if (index != scratch1) {
move32(index, scratch1);
}
movePtr(str, output);
// This follows JSString::getChar.
Label notRope;
branchIfNotRope(str, ¬Rope);
loadRopeLeftChild(str, output);
// Check if the index is contained in the leftChild.
Label loadedChild, notInLeft;
spectreBoundsCheck32(scratch1, Address(output, JSString::offsetOfLength()),
scratch2, ¬InLeft);
if (kind == CharKind::CodePoint) {
branchIfMaybeSplitSurrogatePair(output, scratch1, scratch2, fail,
&loadedChild);
}
jump(&loadedChild);
// The index must be in the rightChild.
// index -= rope->leftChild()->length()
bind(¬InLeft);
sub32(Address(output, JSString::offsetOfLength()), scratch1);
loadRopeRightChild(str, output);
// If the left or right side is another rope, give up.
bind(&loadedChild);
branchIfRope(output, fail);
bind(¬Rope);
Label isLatin1, done;
branchLatin1String(output, &isLatin1);
{
loadStringChars(output, scratch2, CharEncoding::TwoByte);
if (kind == CharKind::CharCode) {
loadChar(scratch2, scratch1, output, CharEncoding::TwoByte);
} else {
// Load the first character.
addToCharPtr(scratch2, scratch1, CharEncoding::TwoByte);
loadChar(Address(scratch2, 0), output, CharEncoding::TwoByte);
// If the first character isn't a lead surrogate, go to |done|.
branchIfNotLeadSurrogate(output, &done);
// branchIfMaybeSplitSurrogatePair ensures that the surrogate pair can't
// split between two rope children. So if |index + 1 < str.length|, then
// |index| and |index + 1| are in the same rope child.
//
// NB: We use the non-adjusted |index| and |str| inputs, because |output|
// was overwritten and no longer contains the rope child.
// If |index + 1| is a valid index into |str|.
add32(Imm32(1), index, scratch1);
spectreBoundsCheck32(scratch1, Address(str, JSString::offsetOfLength()),
InvalidReg, &done);
// Then load the next character at |scratch2 + sizeof(char16_t)|.
loadChar(Address(scratch2, sizeof(char16_t)), scratch1,
CharEncoding::TwoByte);
// If the next character isn't a trail surrogate, go to |done|.
branchIfNotTrailSurrogate(scratch1, scratch2, &done);
// Inlined unicode::UTF16Decode(char16_t, char16_t).
lshift32(Imm32(10), output);
add32(Imm32(unicode::NonBMPMin - (unicode::LeadSurrogateMin << 10) -
unicode::TrailSurrogateMin),
scratch1);
add32(scratch1, output);
}
jump(&done);
}
bind(&isLatin1);
{
loadStringChars(output, scratch2, CharEncoding::Latin1);
loadChar(scratch2, scratch1, output, CharEncoding::Latin1);
}
bind(&done);
}
void MacroAssembler::loadStringChar(Register str, int32_t index,
Register output, Register scratch1,
Register scratch2, Label* fail) {
MOZ_ASSERT(str != output);
MOZ_ASSERT(output != scratch1);
MOZ_ASSERT(output != scratch2);
if (index == 0) {
movePtr(str, scratch1);
// This follows JSString::getChar.
Label notRope;
branchIfNotRope(str, ¬Rope);
loadRopeLeftChild(str, scratch1);
// Rope children can't be empty, so the index can't be in the right side.
// If the left side is another rope, give up.
branchIfRope(scratch1, fail);
bind(¬Rope);
Label isLatin1, done;
branchLatin1String(scratch1, &isLatin1);
loadStringChars(scratch1, scratch2, CharEncoding::TwoByte);
loadChar(Address(scratch2, 0), output, CharEncoding::TwoByte);
jump(&done);
bind(&isLatin1);
loadStringChars(scratch1, scratch2, CharEncoding::Latin1);
loadChar(Address(scratch2, 0), output, CharEncoding::Latin1);
bind(&done);
} else {
move32(Imm32(index), scratch1);
loadStringChar(str, scratch1, output, scratch1, scratch2, fail);
}
}
void MacroAssembler::loadStringIndexValue(Register str, Register dest,
Label* fail) {
MOZ_ASSERT(str != dest);
load32(Address(str, JSString::offsetOfFlags()), dest);
// Does not have a cached index value.
branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
// Extract the index.
rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
}
void MacroAssembler::loadChar(Register chars, Register index, Register dest,
CharEncoding encoding, int32_t offset /* = 0 */) {
if (encoding == CharEncoding::Latin1) {
loadChar(BaseIndex(chars, index, TimesOne, offset), dest, encoding);
} else {
loadChar(BaseIndex(chars, index, TimesTwo, offset), dest, encoding);
}
}
void MacroAssembler::addToCharPtr(Register chars, Register index,
CharEncoding encoding) {
if (encoding == CharEncoding::Latin1) {
static_assert(sizeof(char) == 1,
"Latin-1 string index shouldn't need scaling");
addPtr(index, chars);
} else {
computeEffectiveAddress(BaseIndex(chars, index, TimesTwo), chars);
}
}
void MacroAssembler::branchIfNotLeadSurrogate(Register src, Label* label) {
branch32(Assembler::Below, src, Imm32(unicode::LeadSurrogateMin), label);
branch32(Assembler::Above, src, Imm32(unicode::LeadSurrogateMax), label);
}
void MacroAssembler::branchSurrogate(Assembler::Condition cond, Register src,
Register scratch, Label* label,
SurrogateChar surrogateChar) {
// For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and
// LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following equations hold.
//
// SurrogateMin ≤ x ≤ SurrogateMax
// <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1
// <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift
// See Hacker's Delight, section 4-1 for details.
//
// ((x - SurrogateMin) >>> 10) = 0
// <> floor((x - SurrogateMin) / 1024) = 0
// <> floor((x / 1024) - (SurrogateMin / 1024)) = 0
// <> floor(x / 1024) = SurrogateMin / 1024
// <> floor(x / 1024) * 1024 = SurrogateMin
// <> (x >>> 10) << 10 = SurrogateMin
// <> x & ~(2^10 - 1) = SurrogateMin
constexpr char16_t SurrogateMask = 0xFC00;
char16_t SurrogateMin = surrogateChar == SurrogateChar::Lead
? unicode::LeadSurrogateMin
: unicode::TrailSurrogateMin;
and32(Imm32(SurrogateMask), src, scratch);
branch32(cond, scratch, Imm32(SurrogateMin), label);
}
void MacroAssembler::loadStringFromUnit(Register unit, Register dest,
const StaticStrings& staticStrings) {
movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
loadPtr(BaseIndex(dest, unit, ScalePointer), dest);
}
void MacroAssembler::loadLengthTwoString(Register c1, Register c2,
Register dest,
const StaticStrings& staticStrings) {
// Compute (toSmallCharTable[c1] << SMALL_CHAR_BITS) + toSmallCharTable[c2]
// to obtain the index into `StaticStrings::length2StaticTable`.
static_assert(sizeof(StaticStrings::SmallChar) == 1);
movePtr(ImmPtr(&StaticStrings::toSmallCharTable.storage), dest);
load8ZeroExtend(BaseIndex(dest, c1, Scale::TimesOne), c1);
load8ZeroExtend(BaseIndex(dest, c2, Scale::TimesOne), c2);
lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS), c1);
add32(c2, c1);
// Look up the string from the computed index.
movePtr(ImmPtr(&staticStrings.length2StaticTable), dest);
loadPtr(BaseIndex(dest, c1, ScalePointer), dest);
}
void MacroAssembler::lookupStaticString(Register ch, Register dest,
const StaticStrings& staticStrings) {
MOZ_ASSERT(ch != dest);
movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
loadPtr(BaseIndex(dest, ch, ScalePointer), dest);
}
void MacroAssembler::lookupStaticString(Register ch, Register dest,
const StaticStrings& staticStrings,
Label* fail) {
MOZ_ASSERT(ch != dest);
boundsCheck32PowerOfTwo(ch, StaticStrings::UNIT_STATIC_LIMIT, fail);
movePtr(ImmPtr(&staticStrings.unitStaticTable), dest);
loadPtr(BaseIndex(dest, ch, ScalePointer), dest);
}
void MacroAssembler::lookupStaticString(Register ch1, Register ch2,
Register dest,
const StaticStrings& staticStrings,
Label* fail) {
MOZ_ASSERT(ch1 != dest);
MOZ_ASSERT(ch2 != dest);
branch32(Assembler::AboveOrEqual, ch1,
Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE), fail);
branch32(Assembler::AboveOrEqual, ch2,
Imm32(StaticStrings::SMALL_CHAR_TABLE_SIZE), fail);
movePtr(ImmPtr(&StaticStrings::toSmallCharTable.storage), dest);
load8ZeroExtend(BaseIndex(dest, ch1, Scale::TimesOne), ch1);
load8ZeroExtend(BaseIndex(dest, ch2, Scale::TimesOne), ch2);
branch32(Assembler::Equal, ch1, Imm32(StaticStrings::INVALID_SMALL_CHAR),
fail);
branch32(Assembler::Equal, ch2, Imm32(StaticStrings::INVALID_SMALL_CHAR),
fail);
lshift32(Imm32(StaticStrings::SMALL_CHAR_BITS), ch1);
add32(ch2, ch1);
// Look up the string from the computed index.
movePtr(ImmPtr(&staticStrings.length2StaticTable), dest);
loadPtr(BaseIndex(dest, ch1, ScalePointer), dest);
}
void MacroAssembler::lookupStaticIntString(Register integer, Register dest,
Register scratch,
const StaticStrings& staticStrings,
Label* fail) {
MOZ_ASSERT(integer != scratch);
boundsCheck32PowerOfTwo(integer, StaticStrings::INT_STATIC_LIMIT, fail);
movePtr(ImmPtr(&staticStrings.intStaticTable), scratch);
loadPtr(BaseIndex(scratch, integer, ScalePointer), dest);
}
void MacroAssembler::loadInt32ToStringWithBase(
Register input, Register base, Register dest, Register scratch1,
Register scratch2, const StaticStrings& staticStrings,
const LiveRegisterSet& volatileRegs, bool lowerCase, Label* fail) {
#ifdef DEBUG
Label baseBad, baseOk;
branch32(Assembler::LessThan, base, Imm32(2), &baseBad);
branch32(Assembler::LessThanOrEqual, base, Imm32(36), &baseOk);
bind(&baseBad);
assumeUnreachable("base must be in range [2, 36]");
bind(&baseOk);
#endif
// Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
auto toChar = [this, base, lowerCase](Register r) {
#ifdef DEBUG
Label ok;
branch32(Assembler::Below, r, base, &ok);
assumeUnreachable("bad digit");
bind(&ok);
#else
// Silence unused lambda capture warning.
(void)base;
#endif
Label done;
add32(Imm32('0'), r);
branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
add32(Imm32((lowerCase ? 'a' : 'A') - '0' - 10), r);
bind(&done);
};
// Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
Label lengthTwo, done;
branch32(Assembler::AboveOrEqual, input, base, &lengthTwo);
{
move32(input, scratch1);
toChar(scratch1);
loadStringFromUnit(scratch1, dest, staticStrings);
jump(&done);
}
bind(&lengthTwo);
// Compute |base * base|.
move32(base, scratch1);
mul32(scratch1, scratch1);
// Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
branch32(Assembler::AboveOrEqual, input, scratch1, fail);
{
// Compute |scratch1 = input / base| and |scratch2 = input % base|.
move32(input, scratch1);
flexibleDivMod32(base, scratch1, scratch2, true, volatileRegs);
// Compute the digits of the divisor and remainder.
toChar(scratch1);
toChar(scratch2);
// Look up the 2-character digit string in the small-char table.
loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
}
bind(&done);
}
void MacroAssembler::loadInt32ToStringWithBase(
Register input, int32_t base, Register dest, Register scratch1,
Register scratch2, const StaticStrings& staticStrings, bool lowerCase,
Label* fail) {
MOZ_ASSERT(2 <= base && base <= 36, "base must be in range [2, 36]");
// Compute |"0123456789abcdefghijklmnopqrstuvwxyz"[r]|.
auto toChar = [this, base, lowerCase](Register r) {
#ifdef DEBUG
Label ok;
branch32(Assembler::Below, r, Imm32(base), &ok);
assumeUnreachable("bad digit");
bind(&ok);
#endif
if (base <= 10) {
add32(Imm32('0'), r);
} else {
Label done;
add32(Imm32('0'), r);
branch32(Assembler::BelowOrEqual, r, Imm32('9'), &done);
add32(Imm32((lowerCase ? 'a' : 'A') - '0' - 10), r);
bind(&done);
}
};
// Perform a "unit" lookup when |unsigned(input) < unsigned(base)|.
Label lengthTwo, done;
branch32(Assembler::AboveOrEqual, input, Imm32(base), &lengthTwo);
{
move32(input, scratch1);
toChar(scratch1);
loadStringFromUnit(scratch1, dest, staticStrings);
jump(&done);
}
bind(&lengthTwo);
// Perform a "length2" lookup when |unsigned(input) < unsigned(base * base)|.
branch32(Assembler::AboveOrEqual, input, Imm32(base * base), fail);
{
// Compute |scratch1 = input / base| and |scratch2 = input % base|.
if (mozilla::IsPowerOfTwo(uint32_t(base))) {
uint32_t shift = mozilla::FloorLog2(base);
rshift32(Imm32(shift), input, scratch1);
and32(Imm32((uint32_t(1) << shift) - 1), input, scratch2);
} else {
// The following code matches CodeGenerator::visitUDivOrModConstant()
// for x86-shared. Also see Hacker's Delight 2nd edition, chapter 10-8
// "Unsigned Division by 7" for the case when |rmc.multiplier| exceeds
// UINT32_MAX and we need to adjust the shift amount.
auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(base);
// We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
mulHighUnsigned32(Imm32(rmc.multiplier), input, scratch1);
if (rmc.multiplier > UINT32_MAX) {
// M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that
// ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d,
// contradicting the proof of correctness in computeDivisionConstants.
MOZ_ASSERT(rmc.shiftAmount > 0);
MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33));
// Compute |t = (n - q) / 2|.
move32(input, scratch2);
sub32(scratch1, scratch2);
rshift32(Imm32(1), scratch2);
// Compute |t = (n - q) / 2 + q = (n + q) / 2|.
add32(scratch2, scratch1);
// Finish the computation |q = floor(n / d)|.
rshift32(Imm32(rmc.shiftAmount - 1), scratch1);
} else {
rshift32(Imm32(rmc.shiftAmount), scratch1);
}
// Compute the remainder from |r = n - q * d|.
move32(scratch1, dest);
mul32(Imm32(base), dest);
move32(input, scratch2);
sub32(dest, scratch2);
}
// Compute the digits of the divisor and remainder.
toChar(scratch1);
toChar(scratch2);
// Look up the 2-character digit string in the small-char table.
loadLengthTwoString(scratch1, scratch2, dest, staticStrings);
}
bind(&done);
}
void MacroAssembler::loadBigIntDigits(Register bigInt, Register digits) {
MOZ_ASSERT(digits != bigInt);
// Load the inline digits.
computeEffectiveAddress(Address(bigInt, BigInt::offsetOfInlineDigits()),
digits);
// If inline digits aren't used, load the heap digits. Use a conditional move
// to prevent speculative execution.
cmp32LoadPtr(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(int32_t(BigInt::inlineDigitsLength())),
Address(bigInt, BigInt::offsetOfHeapDigits()), digits);
}
void MacroAssembler::loadBigInt64(Register bigInt, Register64 dest) {
// This code follows the implementation of |BigInt::toUint64()|. We're also
// using it for inline callers of |BigInt::toInt64()|, which works, because
// all supported Jit architectures use a two's complement representation for
// int64 values, which means the WrapToSigned call in toInt64() is a no-op.
Label done, nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
{
move64(Imm64(0), dest);
jump(&done);
}
bind(&nonZero);
#ifdef JS_PUNBOX64
Register digits = dest.reg;
#else
Register digits = dest.high;
#endif
loadBigIntDigits(bigInt, digits);
#if JS_PUNBOX64
// Load the first digit into the destination register.
load64(Address(digits, 0), dest);
#else
// Load the first digit into the destination register's low value.
load32(Address(digits, 0), dest.low);
// And conditionally load the second digit into the high value register.
Label twoDigits, digitsDone;
branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(1), &twoDigits);
{
move32(Imm32(0), dest.high);
jump(&digitsDone);
}
{
bind(&twoDigits);
load32(Address(digits, sizeof(BigInt::Digit)), dest.high);
}
bind(&digitsDone);
#endif
branchTest32(Assembler::Zero, Address(bigInt, BigInt::offsetOfFlags()),
Imm32(BigInt::signBitMask()), &done);
neg64(dest);
bind(&done);
}
void MacroAssembler::loadBigIntDigit(Register bigInt, Register dest) {
Label done, nonZero;
branchIfBigIntIsNonZero(bigInt, &nonZero);
{
movePtr(ImmWord(0), dest);
jump(&done);
}
bind(&nonZero);
loadBigIntDigits(bigInt, dest);
// Load the first digit into the destination register.
loadPtr(Address(dest, 0), dest);
bind(&done);
}
void MacroAssembler::loadBigIntDigit(Register bigInt, Register dest,
Label* fail) {
MOZ_ASSERT(bigInt != dest);
branch32(Assembler::Above, Address(bigInt, BigInt::offsetOfLength()),
Imm32(1), fail);
static_assert(BigInt::inlineDigitsLength() > 0,
"Single digit BigInts use inline storage");
// Load the first inline digit into the destination register.
movePtr(ImmWord(0), dest);
cmp32LoadPtr(Assembler::NotEqual, Address(bigInt, BigInt::offsetOfLength()),
Imm32(0), Address(bigInt, BigInt::offsetOfInlineDigits()), dest);
}
void MacroAssembler::loadBigIntPtr(Register bigInt, Register dest,
Label* fail) {
loadBigIntDigit(bigInt, dest, fail);
// BigInt digits are stored as unsigned numbers. Take the failure path when
// the digit can't be stored in intptr_t.
Label nonNegative, done;
branchIfBigIntIsNonNegative(bigInt, &nonNegative);
{
// Negate |dest| when the BigInt is negative.
negPtr(dest);
// Test after negating to handle INTPTR_MIN correctly.
branchTestPtr(Assembler::NotSigned, dest, dest, fail);
jump(&done);
}
bind(&nonNegative);
branchTestPtr(Assembler::Signed, dest, dest, fail);
bind(&done);
}
void MacroAssembler::initializeBigInt64(Scalar::Type type, Register bigInt,
Register64 val, Register64 temp) {
MOZ_ASSERT(Scalar::isBigIntType(type));
store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
Label done, nonZero;
branch64(Assembler::NotEqual, val, Imm64(0), &nonZero);
{
store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
jump(&done);
}
bind(&nonZero);
if (type == Scalar::BigInt64) {
// Copy the input when we're not allowed to clobber it.
if (temp != Register64::Invalid()) {
move64(val, temp);
val = temp;
}
// Set the sign-bit for negative values and then continue with the two's
// complement.
Label isPositive;
branch64(Assembler::GreaterThan, val, Imm64(0), &isPositive);
{
store32(Imm32(BigInt::signBitMask()),
Address(bigInt, BigInt::offsetOfFlags()));
neg64(val);
}
bind(&isPositive);
}
store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
"BigInt Digit size matches uintptr_t, so there's a single "
"store on 64-bit and up to two stores on 32-bit");
#ifndef JS_PUNBOX64
Label singleDigit;
branchTest32(Assembler::Zero, val.high, val.high, &singleDigit);
store32(Imm32(2), Address(bigInt, BigInt::offsetOfLength()));
bind(&singleDigit);
// We can perform a single store64 on 32-bit platforms, because inline
// storage can store at least two 32-bit integers.
static_assert(BigInt::inlineDigitsLength() >= 2,
"BigInt inline storage can store at least two digits");
#endif
store64(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
bind(&done);
}
void MacroAssembler::initializeBigIntPtr(Register bigInt, Register val) {
store32(Imm32(0), Address(bigInt, BigInt::offsetOfFlags()));
Label done, nonZero;
branchTestPtr(Assembler::NonZero, val, val, &nonZero);
{
store32(Imm32(0), Address(bigInt, BigInt::offsetOfLength()));
jump(&done);
}
bind(&nonZero);
// Set the sign-bit for negative values and then continue with the two's
// complement.
Label isPositive;
branchTestPtr(Assembler::NotSigned, val, val, &isPositive);
{
store32(Imm32(BigInt::signBitMask()),
Address(bigInt, BigInt::offsetOfFlags()));
negPtr(val);
}
bind(&isPositive);
store32(Imm32(1), Address(bigInt, BigInt::offsetOfLength()));
static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
"BigInt Digit size matches uintptr_t");
storePtr(val, Address(bigInt, js::BigInt::offsetOfInlineDigits()));
bind(&done);
}
void MacroAssembler::copyBigIntWithInlineDigits(Register src, Register dest,
Register temp,
gc::Heap initialHeap,
Label* fail) {
branch32(Assembler::Above, Address(src, BigInt::offsetOfLength()),
Imm32(int32_t(BigInt::inlineDigitsLength())), fail);
newGCBigInt(dest, temp, initialHeap, fail);
// Copy the sign-bit, but not any of the other bits used by the GC.
load32(Address(src, BigInt::offsetOfFlags()), temp);
and32(Imm32(BigInt::signBitMask()), temp);
store32(temp, Address(dest, BigInt::offsetOfFlags()));
// Copy the length.
load32(Address(src, BigInt::offsetOfLength()), temp);
store32(temp, Address(dest, BigInt::offsetOfLength()));
// Copy the digits.
Address srcDigits(src, js::BigInt::offsetOfInlineDigits());
Address destDigits(dest, js::BigInt::offsetOfInlineDigits());
for (size_t i = 0; i < BigInt::inlineDigitsLength(); i++) {
static_assert(sizeof(BigInt::Digit) == sizeof(uintptr_t),
"BigInt Digit size matches uintptr_t");
loadPtr(srcDigits, temp);
storePtr(temp, destDigits);
srcDigits = Address(src, srcDigits.offset + sizeof(BigInt::Digit));
destDigits = Address(dest, destDigits.offset + sizeof(BigInt::Digit));
}
}
void MacroAssembler::compareBigIntAndInt32(JSOp op, Register bigInt,
Register int32, Register scratch1,
Register scratch2, Label* ifTrue,
Label* ifFalse) {
MOZ_ASSERT(IsLooseEqualityOp(op) || IsRelationalOp(op));
static_assert(std::is_same_v<BigInt::Digit, uintptr_t>,
"BigInt digit can be loaded in a pointer-sized register");
static_assert(sizeof(BigInt::Digit) >= sizeof(uint32_t),
"BigInt digit stores at least an uint32");
// Test for too large numbers.
//
// If the unsigned value of the BigInt can't be expressed in an uint32/uint64,
// the result of the comparison is a constant.
if (op == JSOp::Eq || op == JSOp::Ne) {
Label* tooLarge = op == JSOp::Eq ? ifFalse : ifTrue;
branch32(Assembler::GreaterThan,
Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
tooLarge);
} else {
Label doCompare;
branch32(Assembler::LessThanOrEqual,
Address(bigInt, BigInt::offsetOfDigitLength()), Imm32(1),
&doCompare);
// Still need to take the sign-bit into account for relational operations.
if (op == JSOp::Lt || op == JSOp::Le) {
branchIfBigIntIsNegative(bigInt, ifTrue);
jump(ifFalse);
} else {
branchIfBigIntIsNegative(bigInt, ifFalse);
jump(ifTrue);
}
bind(&doCompare);
}
// Test for mismatched signs and, if the signs are equal, load |abs(x)| in
// |scratch1| and |abs(y)| in |scratch2| and then compare the unsigned numbers
// against each other.
{
// Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
// resp. strictly greater than the int32 value, depending on the comparison
// operator.
Label* greaterThan;
Label* lessThan;
if (op == JSOp::Eq) {
greaterThan = ifFalse;
lessThan = ifFalse;
} else if (op == JSOp::Ne) {
greaterThan = ifTrue;
lessThan = ifTrue;
} else if (op == JSOp::Lt || op == JSOp::Le) {
greaterThan = ifFalse;
lessThan = ifTrue;
} else {
MOZ_ASSERT(op == JSOp::Gt || op == JSOp::Ge);
greaterThan = ifTrue;
lessThan = ifFalse;
}
// BigInt digits are always stored as an unsigned number.
loadBigIntDigit(bigInt, scratch1);
// Load the int32 into |scratch2| and negate it for negative numbers.
move32(int32, scratch2);
Label isNegative, doCompare;
branchIfBigIntIsNegative(bigInt, &isNegative);
branch32(Assembler::LessThan, int32, Imm32(0), greaterThan);
jump(&doCompare);
// We rely on |neg32(INT32_MIN)| staying INT32_MIN, because we're using an
// unsigned comparison below.
bind(&isNegative);
branch32(Assembler::GreaterThanOrEqual, int32, Imm32(0), lessThan);
neg32(scratch2);
// Not all supported platforms (e.g. MIPS64) zero-extend 32-bit operations,
// so we need to explicitly clear any high 32-bits.
move32ZeroExtendToPtr(scratch2, scratch2);
// Reverse the relational comparator for negative numbers.
// |-x < -y| <=> |+x > +y|.
// |-x ≤ -y| <=> |+x ≥ +y|.
// |-x > -y| <=> |+x < +y|.
// |-x ≥ -y| <=> |+x ≤ +y|.
JSOp reversed = ReverseCompareOp(op);
if (reversed != op) {
branchPtr(JSOpToCondition(reversed, /* isSigned = */ false), scratch1,
scratch2, ifTrue);
jump(ifFalse);
}
bind(&doCompare);
branchPtr(JSOpToCondition(op, /* isSigned = */ false), scratch1, scratch2,
ifTrue);
}
}
void MacroAssembler::compareBigIntAndInt32(JSOp op, Register bigInt,
Imm32 int32, Register scratch,
Label* ifTrue, Label* ifFalse) {
MOZ_ASSERT(IsLooseEqualityOp(op) || IsRelationalOp(op));
static_assert(std::is_same_v<BigInt::Digit, uintptr_t>,
"BigInt digit can be loaded in a pointer-sized register");
static_assert(sizeof(BigInt::Digit) >= sizeof(uint32_t),
"BigInt digit stores at least an uint32");
// Comparison against zero doesn't require loading any BigInt digits.
if (int32.value == 0) {
switch (op) {
case JSOp::Eq:
branchIfBigIntIsZero(bigInt, ifTrue);
break;
case JSOp::Ne:
branchIfBigIntIsNonZero(bigInt, ifTrue);
break;
case JSOp::Lt:
branchIfBigIntIsNegative(bigInt, ifTrue);
break;
case JSOp::Le:
branchIfBigIntIsZero(bigInt, ifTrue);
branchIfBigIntIsNegative(bigInt, ifTrue);
break;
case JSOp::Gt:
branchIfBigIntIsZero(bigInt, ifFalse);
branchIfBigIntIsNonNegative(bigInt, ifTrue);
break;
case JSOp::Ge:
branchIfBigIntIsNonNegative(bigInt, ifTrue);
break;
default:
MOZ_CRASH("bad comparison operator");
}
// Fall through to the false case.
return;
}
// Jump to |ifTrue| resp. |ifFalse| if the BigInt is strictly less than
// resp. strictly greater than the int32 value, depending on the comparison
// operator.
Label* greaterThan;
Label* lessThan;
if (op == JSOp::Eq) {
greaterThan = ifFalse;
lessThan = ifFalse;
} else if (op == JSOp::Ne) {
greaterThan = ifTrue;
lessThan = ifTrue;
} else if (op == JSOp::Lt || op == JSOp::Le) {
greaterThan = ifFalse;
lessThan = ifTrue;
} else {
MOZ_ASSERT(op == JSOp::Gt || op == JSOp::Ge);
greaterThan = ifTrue;
lessThan = ifFalse;
}
// Test for mismatched signs.
if (int32.value > 0) {
branchIfBigIntIsNegative(bigInt, lessThan);
} else {
branchIfBigIntIsNonNegative(bigInt, greaterThan);
}
// Both signs are equal, load |abs(x)| in |scratch| and then compare the
// unsigned numbers against each other.
//
// If the unsigned value of the BigInt can't be expressed in an uint32/uint64,
// the result of the comparison is a constant.
Label* tooLarge = int32.value > 0 ? greaterThan : lessThan;
loadBigIntDigit(bigInt, scratch, tooLarge);
// Use the unsigned value of the immediate.
ImmWord uint32 = ImmWord(mozilla::Abs(int32.value));
// Reverse the relational comparator for negative numbers.
// |-x < -y| <=> |+x > +y|.
// |-x ≤ -y| <=> |+x ≥ +y|.
// |-x > -y| <=> |+x < +y|.
// |-x ≥ -y| <=> |+x ≤ +y|.
if (int32.value < 0) {
op = ReverseCompareOp(op);
}
branchPtr(JSOpToCondition(op, /* isSigned = */ false), scratch, uint32,
ifTrue);
}
void MacroAssembler::equalBigInts(Register left, Register right, Register temp1,
Register temp2, Register temp3,
Register temp4, Label* notSameSign,
Label* notSameLength, Label* notSameDigit) {
MOZ_ASSERT(left != temp1);
MOZ_ASSERT(right != temp1);
MOZ_ASSERT(right != temp2);
// Jump to |notSameSign| when the sign aren't the same.
load32(Address(left, BigInt::offsetOfFlags()), temp1);
xor32(Address(right, BigInt::offsetOfFlags()), temp1);
branchTest32(Assembler::NonZero, temp1, Imm32(BigInt::signBitMask()),
notSameSign);
// Jump to |notSameLength| when the digits length is different.
load32(Address(right, BigInt::offsetOfLength()), temp1);
branch32(Assembler::NotEqual, Address(left, BigInt::offsetOfLength()), temp1,
notSameLength);
// Both BigInts have the same sign and the same number of digits. Loop
// over each digit, starting with the left-most one, and break from the
// loop when the first non-matching digit was found.
loadBigIntDigits(left, temp2);
loadBigIntDigits(right, temp3);
static_assert(sizeof(BigInt::Digit) == sizeof(void*),
"BigInt::Digit is pointer sized");
computeEffectiveAddress(BaseIndex(temp2, temp1, ScalePointer), temp2);
computeEffectiveAddress(BaseIndex(temp3, temp1, ScalePointer), temp3);
Label start, loop;
jump(&start);
bind(&loop);
subPtr(Imm32(sizeof(BigInt::Digit)), temp2);
subPtr(Imm32(sizeof(BigInt::Digit)), temp3);
loadPtr(Address(temp3, 0), temp4);
branchPtr(Assembler::NotEqual, Address(temp2, 0), temp4, notSameDigit);
bind(&start);
branchSub32(Assembler::NotSigned, Imm32(1), temp1, &loop);
// No different digits were found, both BigInts are equal to each other.
}
void MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
Label* isObject, Label* isCallable,
Label* isUndefined) {
loadObjClassUnsafe(obj, scratch);
// Proxies can emulate undefined and have complex isCallable behavior.
branchTestClassIsProxy(true, scratch, slow);
// JSFunctions are always callable.
branchTestClassIsFunction(Assembler::Equal, scratch, isCallable);
// Objects that emulate undefined.
Address flags(scratch, JSClass::offsetOfFlags());
branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED),
isUndefined);
// Handle classes with a call hook.
branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClass, cOps)),
ImmPtr(nullptr), isObject);
loadPtr(Address(scratch, offsetof(JSClass, cOps)), scratch);
branchPtr(Assembler::Equal, Address(scratch, offsetof(JSClassOps, call)),
ImmPtr(nullptr), isObject);
jump(isCallable);
}
void MacroAssembler::isCallableOrConstructor(bool isCallable, Register obj,
Register output, Label* isProxy) {
MOZ_ASSERT(obj != output);
Label notFunction, hasCOps, done;
loadObjClassUnsafe(obj, output);
// An object is callable iff:
// is<JSFunction>() || (getClass()->cOps && getClass()->cOps->call).
// An object is constructor iff:
// ((is<JSFunction>() && as<JSFunction>().isConstructor) ||
// (getClass()->cOps && getClass()->cOps->construct)).
branchTestClassIsFunction(Assembler::NotEqual, output, ¬Function);
if (isCallable) {
move32(Imm32(1), output);
} else {
static_assert(mozilla::IsPowerOfTwo(uint32_t(FunctionFlags::CONSTRUCTOR)),
"FunctionFlags::CONSTRUCTOR has only one bit set");
load32(Address(obj, JSFunction::offsetOfFlagsAndArgCount()), output);
rshift32(Imm32(mozilla::FloorLog2(uint32_t(FunctionFlags::CONSTRUCTOR))),
output);
and32(Imm32(1), output);
}
jump(&done);
bind(¬Function);
if (!isCallable) {
// For bound functions, we need to check the isConstructor flag.
Label notBoundFunction;
branchPtr(Assembler::NotEqual, output, ImmPtr(&BoundFunctionObject::class_),
¬BoundFunction);
static_assert(BoundFunctionObject::IsConstructorFlag == 0b1,
"AND operation results in boolean value");
unboxInt32(Address(obj, BoundFunctionObject::offsetOfFlagsSlot()), output);
and32(Imm32(BoundFunctionObject::IsConstructorFlag), output);
jump(&done);
bind(¬BoundFunction);
}
// Just skim proxies off. Their notion of isCallable()/isConstructor() is
// more complicated.
branchTestClassIsProxy(true, output, isProxy);
branchPtr(Assembler::NonZero, Address(output, offsetof(JSClass, cOps)),
ImmPtr(nullptr), &hasCOps);
move32(Imm32(0), output);
jump(&done);
bind(&hasCOps);
loadPtr(Address(output, offsetof(JSClass, cOps)), output);
size_t opsOffset =
isCallable ? offsetof(JSClassOps, call) : offsetof(JSClassOps, construct);
cmpPtrSet(Assembler::NonZero, Address(output, opsOffset), ImmPtr(nullptr),
output);
bind(&done);
}
void MacroAssembler::loadJSContext(Register dest) {
movePtr(ImmPtr(runtime()->mainContextPtr()), dest);
}
static const uint8_t* ContextRealmPtr(CompileRuntime* rt) {
return (static_cast<const uint8_t*>(rt->mainContextPtr()) +
JSContext::offsetOfRealm());
}
void MacroAssembler::loadGlobalObjectData(Register dest) {
loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest);
loadPtr(Address(dest, Realm::offsetOfActiveGlobal()), dest);
loadPrivate(Address(dest, GlobalObject::offsetOfGlobalDataSlot()), dest);
}
void MacroAssembler::switchToRealm(Register realm) {
storePtr(realm, AbsoluteAddress(ContextRealmPtr(runtime())));
}
void MacroAssembler::loadRealmFuse(RealmFuses::FuseIndex index, Register dest) {
// Load Realm pointer
loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest);
loadPtr(Address(dest, RealmFuses::offsetOfFuseWordRelativeToRealm(index)),
dest);
}
void MacroAssembler::switchToRealm(const void* realm, Register scratch) {
MOZ_ASSERT(realm);
movePtr(ImmPtr(realm), scratch);
switchToRealm(scratch);
}
void MacroAssembler::switchToObjectRealm(Register obj, Register scratch) {
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
switchToRealm(scratch);
}
void MacroAssembler::switchToBaselineFrameRealm(Register scratch) {
Address envChain(FramePointer,
BaselineFrame::reverseOffsetOfEnvironmentChain());
loadPtr(envChain, scratch);
switchToObjectRealm(scratch, scratch);
}
void MacroAssembler::switchToWasmInstanceRealm(Register scratch1,
Register scratch2) {
loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), scratch1);
loadPtr(Address(InstanceReg, wasm::Instance::offsetOfRealm()), scratch2);
storePtr(scratch2, Address(scratch1, JSContext::offsetOfRealm()));
}
template <typename ValueType>
void MacroAssembler::storeLocalAllocSite(ValueType value, Register scratch) {
loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), scratch);
storePtr(value, Address(scratch, JS::Realm::offsetOfLocalAllocSite()));
}
template void MacroAssembler::storeLocalAllocSite(Register, Register);
template void MacroAssembler::storeLocalAllocSite(ImmWord, Register);
template void MacroAssembler::storeLocalAllocSite(ImmPtr, Register);
void MacroAssembler::debugAssertContextRealm(const void* realm,
Register scratch) {
#ifdef DEBUG
Label ok;
movePtr(ImmPtr(realm), scratch);
branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
scratch, &ok);
assumeUnreachable("Unexpected context realm");
bind(&ok);
#endif
}
void MacroAssembler::setIsCrossRealmArrayConstructor(Register obj,
Register output) {
#ifdef DEBUG
Label notProxy;
branchTestObjectIsProxy(false, obj, output, ¬Proxy);
assumeUnreachable("Unexpected proxy in setIsCrossRealmArrayConstructor");
bind(¬Proxy);
#endif
// The object's realm must not be cx->realm.
Label isFalse, done;
loadPtr(Address(obj, JSObject::offsetOfShape()), output);
loadPtr(Address(output, Shape::offsetOfBaseShape()), output);
loadPtr(Address(output, BaseShape::offsetOfRealm()), output);
branchPtr(Assembler::Equal, AbsoluteAddress(ContextRealmPtr(runtime())),
output, &isFalse);
// The object must be a function.
branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
// The function must be the ArrayConstructor native.
branchPtr(Assembler::NotEqual,
Address(obj, JSFunction::offsetOfNativeOrEnv()),
ImmPtr(js::ArrayConstructor), &isFalse);
move32(Imm32(1), output);
jump(&done);
bind(&isFalse);
move32(Imm32(0), output);
bind(&done);
}
void MacroAssembler::guardObjectHasSameRealm(Register obj, Register scratch,
Label* fail) {
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch);
loadPtr(Address(scratch, Shape::offsetOfBaseShape()), scratch);
loadPtr(Address(scratch, BaseShape::offsetOfRealm()), scratch);
branchPtr(Assembler::NotEqual, AbsoluteAddress(ContextRealmPtr(runtime())),
scratch, fail);
}
void MacroAssembler::setIsDefinitelyTypedArrayConstructor(Register obj,
Register output) {
Label isFalse, isTrue, done;
// The object must be a function. (Wrappers are not supported.)
branchTestObjIsFunction(Assembler::NotEqual, obj, output, obj, &isFalse);
// Load the native into |output|.
loadPtr(Address(obj, JSFunction::offsetOfNativeOrEnv()), output);
auto branchIsTypedArrayCtor = [&](Scalar::Type type) {
// The function must be a TypedArrayConstructor native (from any realm).
JSNative constructor = TypedArrayConstructorNative(type);
branchPtr(Assembler::Equal, output, ImmPtr(constructor), &isTrue);
};
#define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
branchIsTypedArrayCtor(Scalar::N);
JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE)
#undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
// Falls through to the false case.
bind(&isFalse);
move32(Imm32(0), output);
jump(&done);
bind(&isTrue);
move32(Imm32(1), output);
bind(&done);
}
void MacroAssembler::loadMegamorphicCache(Register dest) {
movePtr(ImmPtr(runtime()->addressOfMegamorphicCache()), dest);
}
void MacroAssembler::loadMegamorphicSetPropCache(Register dest) {
movePtr(ImmPtr(runtime()->addressOfMegamorphicSetPropCache()), dest);
}
void MacroAssembler::tryFastAtomize(Register str, Register scratch,
Register output, Label* fail) {
Label found, done, notAtomRef;
branchTest32(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_REF_BIT), ¬AtomRef);
loadPtr(Address(str, JSAtomRefString::offsetOfAtom()), output);
jump(&done);
bind(¬AtomRef);
uintptr_t cachePtr = uintptr_t(runtime()->addressOfStringToAtomCache());
void* offset = (void*)(cachePtr + StringToAtomCache::offsetOfLastLookups());
movePtr(ImmPtr(offset), scratch);
static_assert(StringToAtomCache::NumLastLookups == 2);
size_t stringOffset = StringToAtomCache::LastLookup::offsetOfString();
size_t lookupSize = sizeof(StringToAtomCache::LastLookup);
branchPtr(Assembler::Equal, Address(scratch, stringOffset), str, &found);
branchPtr(Assembler::NotEqual, Address(scratch, lookupSize + stringOffset),
str, fail);
addPtr(Imm32(lookupSize), scratch);
// We found a hit in the lastLookups_ array! Load the associated atom
// and jump back up to our usual atom handling code
bind(&found);
size_t atomOffset = StringToAtomCache::LastLookup::offsetOfAtom();
loadPtr(Address(scratch, atomOffset), output);
bind(&done);
}
void MacroAssembler::loadAtomHash(Register id, Register outHash, Label* done) {
Label doneInner, fatInline;
if (!done) {
done = &doneInner;
}
move32(Imm32(JSString::FAT_INLINE_MASK), outHash);
and32(Address(id, JSString::offsetOfFlags()), outHash);
branch32(Assembler::Equal, outHash, Imm32(JSString::FAT_INLINE_MASK),
&fatInline);
load32(Address(id, NormalAtom::offsetOfHash()), outHash);
jump(done);
bind(&fatInline);
load32(Address(id, FatInlineAtom::offsetOfHash()), outHash);
jump(done);
bind(&doneInner);
}
void MacroAssembler::loadAtomOrSymbolAndHash(ValueOperand value, Register outId,
Register outHash,
Label* cacheMiss) {
Label isString, isSymbol, isNull, isUndefined, done, nonAtom, atom;
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestString(Assembler::Equal, tag, &isString);
branchTestSymbol(Assembler::Equal, tag, &isSymbol);
branchTestNull(Assembler::Equal, tag, &isNull);
branchTestUndefined(Assembler::NotEqual, tag, cacheMiss);
}
const JSAtomState& names = runtime()->names();
movePropertyKey(NameToId(names.undefined), outId);
move32(Imm32(names.undefined->hash()), outHash);
jump(&done);
bind(&isNull);
movePropertyKey(NameToId(names.null), outId);
move32(Imm32(names.null->hash()), outHash);
jump(&done);
bind(&isSymbol);
unboxSymbol(value, outId);
load32(Address(outId, JS::Symbol::offsetOfHash()), outHash);
orPtr(Imm32(PropertyKey::SymbolTypeTag), outId);
jump(&done);
bind(&isString);
unboxString(value, outId);
branchTest32(Assembler::Zero, Address(outId, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &nonAtom);
bind(&atom);
loadAtomHash(outId, outHash, &done);
bind(&nonAtom);
tryFastAtomize(outId, outHash, outId, cacheMiss);
jump(&atom);
bind(&done);
}
void MacroAssembler::emitExtractValueFromMegamorphicCacheEntry(
Register obj, Register entry, Register scratch1, Register scratch2,
ValueOperand output, Label* cacheHit, Label* cacheMiss,
Label* cacheHitGetter) {
Label isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
// scratch2 = entry->hopsAndKind_
load8ZeroExtend(
Address(entry, MegamorphicCache::Entry::offsetOfHopsAndKind()), scratch2);
// if (scratch2 == NumHopsForMissingProperty) goto isMissing
branch32(Assembler::Equal, scratch2,
Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
&isMissing);
if (cacheHitGetter) {
// Here we're going to set scratch1 to 0 for a data property and 1 for a
// getter and scratch2 to the number of hops
Label dataProperty;
// if (scratch2 & NonDataPropertyFlag == 0) goto dataProperty
move32(Imm32(0), scratch1);
branchTest32(Assembler::Zero, scratch2,
Imm32(MegamorphicCache::Entry::NonDataPropertyFlag),
&dataProperty);
// if (scratch2 > NonDataPropertyFlag | MaxHopsForAccessorProperty) goto
// cacheMiss
branch32(Assembler::GreaterThan, scratch2,
Imm32(MegamorphicCache::Entry::NonDataPropertyFlag |
MegamorphicCache::Entry::MaxHopsForAccessorProperty),
cacheMiss);
and32(Imm32(~MegamorphicCache::Entry::NonDataPropertyFlag), scratch2);
move32(Imm32(1), scratch1);
bind(&dataProperty);
} else {
// if (scratch2 & NonDataPropertyFlag) goto cacheMiss
branchTest32(Assembler::NonZero, scratch2,
Imm32(MegamorphicCache::Entry::NonDataPropertyFlag),
cacheMiss);
}
// NOTE: Where this is called, `output` can actually alias `obj`, and before
// the last cacheMiss branch above we can't write to `obj`, so we can't
// use `output`'s scratch register there. However a cache miss is impossible
// now, so we're free to use `output` as we like.
Register outputScratch = output.scratchReg();
if (!outputScratch.aliases(obj)) {
// We're okay with paying this very slight extra cost to avoid a potential
// footgun of writing to what callers understand as only an input register.
movePtr(obj, outputScratch);
}
branchTest32(Assembler::Zero, scratch2, scratch2, &protoLoopTail);
bind(&protoLoopHead);
loadObjProto(outputScratch, outputScratch);
branchSub32(Assembler::NonZero, Imm32(1), scratch2, &protoLoopHead);
bind(&protoLoopTail);
// entry = entry->slotOffset()
load32(Address(entry, MegamorphicCacheEntry::offsetOfSlotOffset()), entry);
// scratch2 = slotOffset.offset()
rshift32(Imm32(TaggedSlotOffset::OffsetShift), entry, scratch2);
// if (!slotOffset.isFixedSlot()) goto dynamicSlot
branchTest32(Assembler::Zero, entry, Imm32(TaggedSlotOffset::IsFixedSlotFlag),
&dynamicSlot);
// output = outputScratch[scratch2]
loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
if (cacheHitGetter) {
branchTest32(Assembler::NonZero, scratch1, scratch1, cacheHitGetter);
}
jump(cacheHit);
bind(&dynamicSlot);
// output = outputScratch->slots_[scratch2]
loadPtr(Address(outputScratch, NativeObject::offsetOfSlots()), outputScratch);
loadValue(BaseIndex(outputScratch, scratch2, TimesOne), output);
if (cacheHitGetter) {
branchTest32(Assembler::NonZero, scratch1, scratch1, cacheHitGetter);
}
jump(cacheHit);
bind(&isMissing);
// output = undefined
moveValue(UndefinedValue(), output);
jump(cacheHit);
}
template <typename IdOperandType>
void MacroAssembler::emitMegamorphicCacheLookupByValueCommon(
IdOperandType id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, Label* cacheMiss, Label* cacheMissWithEntry) {
// A lot of this code is shared with emitMegamorphicCacheLookup. It would
// be nice to be able to avoid the duplication here, but due to a few
// differences like taking the id in a ValueOperand instead of being able
// to bake it in as an immediate, and only needing a Register for the output
// value, it seemed more awkward to read once it was deduplicated.
// outEntryPtr = obj->shape()
loadPtr(Address(obj, JSObject::offsetOfShape()), outEntryPtr);
movePtr(outEntryPtr, scratch2);
// outEntryPtr = (outEntryPtr >> 3) ^ (outEntryPtr >> 13) + idHash
rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
xorPtr(scratch2, outEntryPtr);
if constexpr (std::is_same<IdOperandType, ValueOperand>::value) {
loadAtomOrSymbolAndHash(id, scratch1, scratch2, cacheMiss);
} else {
static_assert(std::is_same<IdOperandType, Register>::value);
movePtr(id, scratch1);
loadAtomHash(scratch1, scratch2, nullptr);
}
addPtr(scratch2, outEntryPtr);
// outEntryPtr %= MegamorphicCache::NumEntries
constexpr size_t cacheSize = MegamorphicCache::NumEntries;
static_assert(mozilla::IsPowerOfTwo(cacheSize));
size_t cacheMask = cacheSize - 1;
and32(Imm32(cacheMask), outEntryPtr);
loadMegamorphicCache(scratch2);
// outEntryPtr = &scratch2->entries_[outEntryPtr]
constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
static_assert(sizeof(void*) == 4 || entrySize == 24);
if constexpr (sizeof(void*) == 4) {
mul32(Imm32(entrySize), outEntryPtr);
computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
MegamorphicCache::offsetOfEntries()),
outEntryPtr);
} else {
computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
outEntryPtr);
computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
MegamorphicCache::offsetOfEntries()),
outEntryPtr);
}
// if (outEntryPtr->key_ != scratch1) goto cacheMissWithEntry
branchPtr(Assembler::NotEqual,
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
scratch1, cacheMissWithEntry);
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
// if (outEntryPtr->shape_ != scratch1) goto cacheMissWithEntry
branchPtr(Assembler::NotEqual,
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
scratch1, cacheMissWithEntry);
// scratch2 = scratch2->generation_
load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
scratch2);
load16ZeroExtend(
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
scratch1);
// if (outEntryPtr->generation_ != scratch2) goto cacheMissWithEntry
branch32(Assembler::NotEqual, scratch1, scratch2, cacheMissWithEntry);
}
void MacroAssembler::emitMegamorphicCacheLookup(
PropertyKey id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, ValueOperand output, Label* cacheHit,
Label* cacheHitGetter) {
Label cacheMiss, isMissing, dynamicSlot, protoLoopHead, protoLoopTail;
// scratch1 = obj->shape()
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
movePtr(scratch1, outEntryPtr);
movePtr(scratch1, scratch2);
// outEntryPtr = (scratch1 >> 3) ^ (scratch1 >> 13) + hash(id)
rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift1), outEntryPtr);
rshiftPtr(Imm32(MegamorphicCache::ShapeHashShift2), scratch2);
xorPtr(scratch2, outEntryPtr);
addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), outEntryPtr);
// outEntryPtr %= MegamorphicCache::NumEntries
constexpr size_t cacheSize = MegamorphicCache::NumEntries;
static_assert(mozilla::IsPowerOfTwo(cacheSize));
size_t cacheMask = cacheSize - 1;
and32(Imm32(cacheMask), outEntryPtr);
loadMegamorphicCache(scratch2);
// outEntryPtr = &scratch2->entries_[outEntryPtr]
constexpr size_t entrySize = sizeof(MegamorphicCache::Entry);
static_assert(sizeof(void*) == 4 || entrySize == 24);
if constexpr (sizeof(void*) == 4) {
mul32(Imm32(entrySize), outEntryPtr);
computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesOne,
MegamorphicCache::offsetOfEntries()),
outEntryPtr);
} else {
computeEffectiveAddress(BaseIndex(outEntryPtr, outEntryPtr, TimesTwo),
outEntryPtr);
computeEffectiveAddress(BaseIndex(scratch2, outEntryPtr, TimesEight,
MegamorphicCache::offsetOfEntries()),
outEntryPtr);
}
// if (outEntryPtr->shape_ != scratch1) goto cacheMiss
branchPtr(Assembler::NotEqual,
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfShape()),
scratch1, &cacheMiss);
// if (outEntryPtr->key_ != id) goto cacheMiss
movePropertyKey(id, scratch1);
branchPtr(Assembler::NotEqual,
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfKey()),
scratch1, &cacheMiss);
// scratch2 = scratch2->generation_
load16ZeroExtend(Address(scratch2, MegamorphicCache::offsetOfGeneration()),
scratch2);
load16ZeroExtend(
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfGeneration()),
scratch1);
// if (outEntryPtr->generation_ != scratch2) goto cacheMiss
branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
emitExtractValueFromMegamorphicCacheEntry(obj, outEntryPtr, scratch1,
scratch2, output, cacheHit,
&cacheMiss, cacheHitGetter);
bind(&cacheMiss);
}
template <typename IdOperandType>
void MacroAssembler::emitMegamorphicCacheLookupByValue(
IdOperandType id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, ValueOperand output, Label* cacheHit,
Label* cacheHitGetter) {
Label cacheMiss, cacheMissWithEntry;
emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
outEntryPtr, &cacheMiss,
&cacheMissWithEntry);
emitExtractValueFromMegamorphicCacheEntry(
obj, outEntryPtr, scratch1, scratch2, output, cacheHit,
&cacheMissWithEntry, cacheHitGetter);
bind(&cacheMiss);
xorPtr(outEntryPtr, outEntryPtr);
bind(&cacheMissWithEntry);
}
template void MacroAssembler::emitMegamorphicCacheLookupByValue<ValueOperand>(
ValueOperand id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, ValueOperand output, Label* cacheHit,
Label* cacheHitGetter);
template void MacroAssembler::emitMegamorphicCacheLookupByValue<Register>(
Register id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, ValueOperand output, Label* cacheHit,
Label* cacheHitGetter);
void MacroAssembler::emitMegamorphicCacheLookupExists(
ValueOperand id, Register obj, Register scratch1, Register scratch2,
Register outEntryPtr, Register output, Label* cacheHit, bool hasOwn) {
Label cacheMiss, cacheMissWithEntry, cacheHitFalse;
emitMegamorphicCacheLookupByValueCommon(id, obj, scratch1, scratch2,
outEntryPtr, &cacheMiss,
&cacheMissWithEntry);
// scratch1 = outEntryPtr->hopsAndKind_
load8ZeroExtend(
Address(outEntryPtr, MegamorphicCache::Entry::offsetOfHopsAndKind()),
scratch1);
branch32(Assembler::Equal, scratch1,
Imm32(MegamorphicCache::Entry::NumHopsForMissingProperty),
&cacheHitFalse);
branchTest32(Assembler::NonZero, scratch1,
Imm32(MegamorphicCache::Entry::NonDataPropertyFlag),
&cacheMissWithEntry);
if (hasOwn) {
branch32(Assembler::NotEqual, scratch1, Imm32(0), &cacheHitFalse);
}
move32(Imm32(1), output);
jump(cacheHit);
bind(&cacheHitFalse);
xor32(output, output);
jump(cacheHit);
bind(&cacheMiss);
xorPtr(outEntryPtr, outEntryPtr);
bind(&cacheMissWithEntry);
}
void MacroAssembler::extractCurrentIndexAndKindFromIterator(Register iterator,
Register outIndex,
Register outKind) {
// Load iterator object
Address nativeIterAddr(iterator,
PropertyIteratorObject::offsetOfIteratorSlot());
loadPrivate(nativeIterAddr, outIndex);
// Compute offset of propertyCursor_ from propertiesBegin()
loadPtr(Address(outIndex, NativeIterator::offsetOfPropertyCursor()), outKind);
subPtr(Address(outIndex, NativeIterator::offsetOfShapesEnd()), outKind);
// Compute offset of current index from indicesBegin(). Note that because
// propertyCursor has already been incremented, this is actually the offset
// of the next index. We adjust accordingly below.
size_t indexAdjustment =
sizeof(GCPtr<JSLinearString*>) / sizeof(PropertyIndex);
if (indexAdjustment != 1) {
MOZ_ASSERT(indexAdjustment == 2);
rshift32(Imm32(1), outKind);
}
// Load current index.
loadPtr(Address(outIndex, NativeIterator::offsetOfPropertiesEnd()), outIndex);
load32(BaseIndex(outIndex, outKind, Scale::TimesOne,
-int32_t(sizeof(PropertyIndex))),
outIndex);
// Extract kind.
rshift32(Imm32(PropertyIndex::KindShift), outIndex, outKind);
// Extract index.
and32(Imm32(PropertyIndex::IndexMask), outIndex);
}
template <typename IdType>
void MacroAssembler::emitMegamorphicCachedSetSlot(
IdType id, Register obj, Register scratch1,
#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
Register scratch2, Register scratch3,
#endif
ValueOperand value, const LiveRegisterSet& liveRegs, Label* cacheHit,
void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType)) {
Label cacheMiss, dynamicSlot, doAdd, doSet, doAddDynamic, doSetDynamic;
#ifdef JS_CODEGEN_X86
pushValue(value);
Register scratch2 = value.typeReg();
Register scratch3 = value.payloadReg();
#endif
// outEntryPtr = obj->shape()
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch3);
movePtr(scratch3, scratch2);
// scratch3 = (scratch3 >> 3) ^ (scratch3 >> 13) + idHash
rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift1), scratch3);
rshiftPtr(Imm32(MegamorphicSetPropCache::ShapeHashShift2), scratch2);
xorPtr(scratch2, scratch3);
if constexpr (std::is_same<IdType, ValueOperand>::value) {
loadAtomOrSymbolAndHash(id, scratch1, scratch2, &cacheMiss);
addPtr(scratch2, scratch3);
} else {
static_assert(std::is_same<IdType, PropertyKey>::value);
addPtr(Imm32(HashAtomOrSymbolPropertyKey(id)), scratch3);
movePropertyKey(id, scratch1);
}
// scratch3 %= MegamorphicSetPropCache::NumEntries
constexpr size_t cacheSize = MegamorphicSetPropCache::NumEntries;
static_assert(mozilla::IsPowerOfTwo(cacheSize));
size_t cacheMask = cacheSize - 1;
and32(Imm32(cacheMask), scratch3);
loadMegamorphicSetPropCache(scratch2);
// scratch3 = &scratch2->entries_[scratch3]
constexpr size_t entrySize = sizeof(MegamorphicSetPropCache::Entry);
mul32(Imm32(entrySize), scratch3);
computeEffectiveAddress(BaseIndex(scratch2, scratch3, TimesOne,
MegamorphicSetPropCache::offsetOfEntries()),
scratch3);
// if (scratch3->key_ != scratch1) goto cacheMiss
branchPtr(Assembler::NotEqual,
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfKey()),
scratch1, &cacheMiss);
loadPtr(Address(obj, JSObject::offsetOfShape()), scratch1);
// if (scratch3->shape_ != scratch1) goto cacheMiss
branchPtr(Assembler::NotEqual,
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfShape()),
scratch1, &cacheMiss);
// scratch2 = scratch2->generation_
load16ZeroExtend(
Address(scratch2, MegamorphicSetPropCache::offsetOfGeneration()),
scratch2);
load16ZeroExtend(
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfGeneration()),
scratch1);
// if (scratch3->generation_ != scratch2) goto cacheMiss
branch32(Assembler::NotEqual, scratch1, scratch2, &cacheMiss);
// scratch2 = entry->slotOffset()
load32(
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfSlotOffset()),
scratch2);
// scratch1 = slotOffset.offset()
rshift32(Imm32(TaggedSlotOffset::OffsetShift), scratch2, scratch1);
Address afterShapePtr(scratch3,
MegamorphicSetPropCache::Entry::offsetOfAfterShape());
// if (!slotOffset.isFixedSlot()) goto dynamicSlot
branchTest32(Assembler::Zero, scratch2,
Imm32(TaggedSlotOffset::IsFixedSlotFlag), &dynamicSlot);
// Calculate slot address in scratch1. Jump to doSet if scratch3 == nullptr,
// else jump (or fall-through) to doAdd.
addPtr(obj, scratch1);
branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSet);
jump(&doAdd);
bind(&dynamicSlot);
branchPtr(Assembler::Equal, afterShapePtr, ImmPtr(nullptr), &doSetDynamic);
Address slotAddr(scratch1, 0);
// If entry->newCapacity_ is nonzero, we need to grow the slots on the
// object. Otherwise just jump straight to a dynamic add.
load16ZeroExtend(
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfNewCapacity()),
scratch2);
branchTest32(Assembler::Zero, scratch2, scratch2, &doAddDynamic);
LiveRegisterSet save;
save.set() = RegisterSet::Intersect(liveRegs.set(), RegisterSet::Volatile());
save.addUnchecked(scratch1); // Used as call temp below.
save.takeUnchecked(scratch2); // Used for the return value.
PushRegsInMask(save);
using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
setupUnalignedABICall(scratch1);
loadJSContext(scratch1);
passABIArg(scratch1);
passABIArg(obj);
passABIArg(scratch2);
callWithABI<Fn, NativeObject::growSlotsPure>();
storeCallPointerResult(scratch2);
MOZ_ASSERT(!save.has(scratch2));
PopRegsInMask(save);
branchIfFalseBool(scratch2, &cacheMiss);
bind(&doAddDynamic);
addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
bind(&doAdd);
// scratch3 = entry->afterShape()
loadPtr(
Address(scratch3, MegamorphicSetPropCache::Entry::offsetOfAfterShape()),
scratch3);
storeObjShape(scratch3, obj,
[emitPreBarrier](MacroAssembler& masm, const Address& addr) {
emitPreBarrier(masm, addr, MIRType::Shape);
});
#ifdef JS_CODEGEN_X86
popValue(value);
#endif
storeValue(value, slotAddr);
jump(cacheHit);
bind(&doSetDynamic);
addPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
bind(&doSet);
guardedCallPreBarrier(slotAddr, MIRType::Value);
#ifdef JS_CODEGEN_X86
popValue(value);
#endif
storeValue(value, slotAddr);
jump(cacheHit);
bind(&cacheMiss);
#ifdef JS_CODEGEN_X86
popValue(value);
#endif
}
template void MacroAssembler::emitMegamorphicCachedSetSlot<PropertyKey>(
PropertyKey id, Register obj, Register scratch1,
#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
Register scratch2, Register scratch3,
#endif
ValueOperand value, const LiveRegisterSet& liveRegs, Label* cacheHit,
void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
template void MacroAssembler::emitMegamorphicCachedSetSlot<ValueOperand>(
ValueOperand id, Register obj, Register scratch1,
#ifndef JS_CODEGEN_X86 // See MegamorphicSetElement in LIROps.yaml
Register scratch2, Register scratch3,
#endif
ValueOperand value, const LiveRegisterSet& liveRegs, Label* cacheHit,
void (*emitPreBarrier)(MacroAssembler&, const Address&, MIRType));
void MacroAssembler::guardNonNegativeIntPtrToInt32(Register reg, Label* fail) {
#ifdef DEBUG
Label ok;
branchPtr(Assembler::NotSigned, reg, reg, &ok);
assumeUnreachable("Unexpected negative value");
bind(&ok);
#endif
#ifdef JS_64BIT
branchPtr(Assembler::Above, reg, Imm32(INT32_MAX), fail);
#endif
}
void MacroAssembler::loadArrayBufferByteLengthIntPtr(Register obj,
Register output) {
Address slotAddr(obj, ArrayBufferObject::offsetOfByteLengthSlot());
loadPrivate(slotAddr, output);
}
void MacroAssembler::loadArrayBufferViewByteOffsetIntPtr(Register obj,
Register output) {
Address slotAddr(obj, ArrayBufferViewObject::byteOffsetOffset());
loadPrivate(slotAddr, output);
}
void MacroAssembler::loadArrayBufferViewLengthIntPtr(Register obj,
Register output) {
Address slotAddr(obj, ArrayBufferViewObject::lengthOffset());
loadPrivate(slotAddr, output);
}
void MacroAssembler::loadGrowableSharedArrayBufferByteLengthIntPtr(
Synchronization sync, Register obj, Register output) {
// Load the SharedArrayRawBuffer.
loadPrivate(Address(obj, SharedArrayBufferObject::rawBufferOffset()), output);
memoryBarrierBefore(sync);
// Load the byteLength of the SharedArrayRawBuffer into |output|.
static_assert(sizeof(mozilla::Atomic<size_t>) == sizeof(size_t));
loadPtr(Address(output, SharedArrayRawBuffer::offsetOfByteLength()), output);
memoryBarrierAfter(sync);
}
void MacroAssembler::loadResizableArrayBufferViewLengthIntPtr(
ResizableArrayBufferView view, Synchronization sync, Register obj,
Register output, Register scratch) {
// Inline implementation of ArrayBufferViewObject::length(), when the input is
// guaranteed to be a resizable arraybuffer view object.
loadArrayBufferViewLengthIntPtr(obj, output);
Label done;
branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
// Load obj->elements in |scratch|.
loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
// If backed by non-shared memory, detached and out-of-bounds both return
// zero, so we're done here.
branchTest32(Assembler::Zero,
Address(scratch, ObjectElements::offsetOfFlags()),
Imm32(ObjectElements::SHARED_MEMORY), &done);
// Load the auto-length slot.
unboxBoolean(Address(obj, ArrayBufferViewObject::autoLengthOffset()),
scratch);
// If non-auto length, there's nothing to do.
branchTest32(Assembler::Zero, scratch, scratch, &done);
// Load bufferByteLength into |output|.
{
// Resizable TypedArrays are guaranteed to have an ArrayBuffer.
unboxObject(Address(obj, ArrayBufferViewObject::bufferOffset()), output);
// Load the byte length from the raw-buffer of growable SharedArrayBuffers.
loadGrowableSharedArrayBufferByteLengthIntPtr(sync, output, output);
}
// Load the byteOffset into |scratch|.
loadArrayBufferViewByteOffsetIntPtr(obj, scratch);
// Compute the accessible byte length |bufferByteLength - byteOffset|.
subPtr(scratch, output);
if (view == ResizableArrayBufferView::TypedArray) {
// Compute the array length from the byte length.
resizableTypedArrayElementShiftBy(obj, output, scratch);
}
bind(&done);
}
void MacroAssembler::loadResizableTypedArrayByteOffsetMaybeOutOfBoundsIntPtr(
Register obj, Register output, Register scratch) {
// Inline implementation of TypedArrayObject::byteOffsetMaybeOutOfBounds(),
// when the input is guaranteed to be a resizable typed array object.
loadArrayBufferViewByteOffsetIntPtr(obj, output);
// TypedArray is neither detached nor out-of-bounds when byteOffset non-zero.
Label done;
branchPtr(Assembler::NotEqual, output, ImmWord(0), &done);
// We're done when the initial byteOffset is zero.
loadPrivate(Address(obj, ArrayBufferViewObject::initialByteOffsetOffset()),
output);
branchPtr(Assembler::Equal, output, ImmWord(0), &done);
// If the buffer is attached, return initialByteOffset.
branchIfHasAttachedArrayBuffer(obj, scratch, &done);
// Otherwise return zero to match the result for fixed-length TypedArrays.
movePtr(ImmWord(0), output);
bind(&done);
}
void MacroAssembler::dateFillLocalTimeSlots(
Register obj, Register scratch, const LiveRegisterSet& volatileRegs) {
// Inline implementation of the cache check from
// DateObject::fillLocalTimeSlots().
Label callVM, done;
// Check if the cache is already populated.
branchTestUndefined(Assembler::Equal,
Address(obj, DateObject::offsetOfLocalTimeSlot()),
&callVM);
unboxInt32(Address(obj, DateObject::offsetOfUTCTimeZoneOffsetSlot()),
scratch);
branch32(Assembler::Equal,
AbsoluteAddress(DateTimeInfo::addressOfUTCToLocalOffsetSeconds()),
scratch, &done);
bind(&callVM);
{
PushRegsInMask(volatileRegs);
using Fn = void (*)(DateObject*);
setupUnalignedABICall(scratch);
passABIArg(obj);
callWithABI<Fn, jit::DateFillLocalTimeSlots>();
PopRegsInMask(volatileRegs);
}
bind(&done);
}
void MacroAssembler::udiv32ByConstant(Register src, uint32_t divisor,
Register dest) {
auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(divisor);
MOZ_ASSERT(rmc.multiplier <= UINT32_MAX, "division needs scratch register");
// We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
mulHighUnsigned32(Imm32(rmc.multiplier), src, dest);
// Finish the computation |q = floor(n / d)|.
rshift32(Imm32(rmc.shiftAmount), dest);
}
void MacroAssembler::umod32ByConstant(Register src, uint32_t divisor,
Register dest, Register scratch) {
MOZ_ASSERT(dest != scratch);
auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(divisor);
MOZ_ASSERT(rmc.multiplier <= UINT32_MAX, "division needs scratch register");
if (src != dest) {
move32(src, dest);
}
// We first compute |q = (M * n) >> 32), where M = rmc.multiplier.
mulHighUnsigned32(Imm32(rmc.multiplier), dest, scratch);
// Finish the computation |q = floor(n / d)|.
rshift32(Imm32(rmc.shiftAmount), scratch);
// Compute the remainder from |r = n - q * d|.
mul32(Imm32(divisor), scratch);
sub32(scratch, dest);
}
template <typename GetTimeFn>
void MacroAssembler::dateTimeFromSecondsIntoYear(ValueOperand secondsIntoYear,
ValueOperand output,
Register scratch1,
Register scratch2,
GetTimeFn getTimeFn) {
#ifdef DEBUG
Label okValue;
branchTestInt32(Assembler::Equal, secondsIntoYear, &okValue);
branchTestNaNValue(Assembler::Equal, secondsIntoYear, scratch1, &okValue);
assumeUnreachable("secondsIntoYear is an int32 or NaN");
bind(&okValue);
#endif
moveValue(secondsIntoYear, output);
Label done;
fallibleUnboxInt32(secondsIntoYear, scratch1, &done);
#ifdef DEBUG
Label okInt;
branchTest32(Assembler::NotSigned, scratch1, scratch1, &okInt);
assumeUnreachable("secondsIntoYear is an unsigned int32");
bind(&okInt);
#endif
getTimeFn(scratch1, scratch1, scratch2);
tagValue(JSVAL_TYPE_INT32, scratch1, output);
bind(&done);
}
void MacroAssembler::dateHoursFromSecondsIntoYear(ValueOperand secondsIntoYear,
ValueOperand output,
Register scratch1,
Register scratch2) {
// Inline implementation of seconds-into-year to local hours computation from
// date_getHours.
// Compute `(yearSeconds / SecondsPerHour) % HoursPerDay`.
auto hoursFromSecondsIntoYear = [this](Register src, Register dest,
Register scratch) {
udiv32ByConstant(src, SecondsPerHour, dest);
umod32ByConstant(dest, HoursPerDay, dest, scratch);
};
dateTimeFromSecondsIntoYear(secondsIntoYear, output, scratch1, scratch2,
hoursFromSecondsIntoYear);
}
void MacroAssembler::dateMinutesFromSecondsIntoYear(
ValueOperand secondsIntoYear, ValueOperand output, Register scratch1,
Register scratch2) {
// Inline implementation of seconds-into-year to local minutes computation
// from date_getMinutes.
// Compute `(yearSeconds / SecondsPerMinute) % MinutesPerHour`.
auto minutesFromSecondsIntoYear = [this](Register src, Register dest,
Register scratch) {
udiv32ByConstant(src, SecondsPerMinute, dest);
umod32ByConstant(dest, MinutesPerHour, dest, scratch);
};
dateTimeFromSecondsIntoYear(secondsIntoYear, output, scratch1, scratch2,
minutesFromSecondsIntoYear);
}
void MacroAssembler::dateSecondsFromSecondsIntoYear(
ValueOperand secondsIntoYear, ValueOperand output, Register scratch1,
Register scratch2) {
// Inline implementation of seconds-into-year to local seconds computation
// from date_getSeconds.
// Compute `yearSeconds % SecondsPerMinute`.
auto secondsFromSecondsIntoYear = [this](Register src, Register dest,
Register scratch) {
umod32ByConstant(src, SecondsPerMinute, dest, scratch);
};
dateTimeFromSecondsIntoYear(secondsIntoYear, output, scratch1, scratch2,
secondsFromSecondsIntoYear);
}
void MacroAssembler::computeImplicitThis(Register env, ValueOperand output,
Label* slowPath) {
// Inline implementation of ComputeImplicitThis.
Register scratch = output.scratchReg();
MOZ_ASSERT(scratch != env);
loadObjClassUnsafe(env, scratch);
// Go to the slow path for possible debug environment proxies.
branchTestClassIsProxy(true, scratch, slowPath);
// WithEnvironmentObjects have an actual implicit |this|.
Label nonWithEnv, done;
branchPtr(Assembler::NotEqual, scratch,
ImmPtr(&WithEnvironmentObject::class_), &nonWithEnv);
{
if (JitOptions.spectreObjectMitigations) {
spectreZeroRegister(Assembler::NotEqual, scratch, env);
}
loadValue(Address(env, WithEnvironmentObject::offsetOfThisSlot()), output);
jump(&done);
}
bind(&nonWithEnv);
// The implicit |this| is |undefined| for all environment types except
// WithEnvironmentObject.
moveValue(JS::UndefinedValue(), output);
bind(&done);
}
void MacroAssembler::loadDOMExpandoValueGuardGeneration(
Register obj, ValueOperand output,
JS::ExpandoAndGeneration* expandoAndGeneration, uint64_t generation,
Label* fail) {
loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()),
output.scratchReg());
loadValue(Address(output.scratchReg(),
js::detail::ProxyReservedSlots::offsetOfPrivateSlot()),
output);
// Guard the ExpandoAndGeneration* matches the proxy's ExpandoAndGeneration
// privateSlot.
branchTestValue(Assembler::NotEqual, output,
PrivateValue(expandoAndGeneration), fail);
// Guard expandoAndGeneration->generation matches the expected generation.
Address generationAddr(output.payloadOrValueReg(),
JS::ExpandoAndGeneration::offsetOfGeneration());
branch64(Assembler::NotEqual, generationAddr, Imm64(generation), fail);
// Load expandoAndGeneration->expando into the output Value register.
loadValue(Address(output.payloadOrValueReg(),
JS::ExpandoAndGeneration::offsetOfExpando()),
output);
}
void MacroAssembler::loadJitActivation(Register dest) {
loadJSContext(dest);
loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
}
void MacroAssembler::loadBaselineCompileQueue(Register dest) {
loadPtr(AbsoluteAddress(ContextRealmPtr(runtime())), dest);
computeEffectiveAddress(Address(dest, Realm::offsetOfBaselineCompileQueue()),
dest);
}
void MacroAssembler::guardSpecificAtom(Register str, JSAtom* atom,
Register scratch,
const LiveRegisterSet& volatileRegs,
Label* fail) {
Label done, notCachedAtom;
branchPtr(Assembler::Equal, str, ImmGCPtr(atom), &done);
// The pointers are not equal, so if the input string is also an atom it
// must be a different string.
branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), fail);
// Try to do a cheap atomize on the string and repeat the above test
tryFastAtomize(str, scratch, scratch, ¬CachedAtom);
branchPtr(Assembler::Equal, scratch, ImmGCPtr(atom), &done);
jump(fail);
bind(¬CachedAtom);
// Check the length.
branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
Imm32(atom->length()), fail);
// Compare short atoms using inline assembly.
if (canCompareStringCharsInline(atom)) {
// Pure two-byte strings can't be equal to Latin-1 strings.
if (atom->hasTwoByteChars()) {
JS::AutoCheckCannotGC nogc;
if (!mozilla::IsUtf16Latin1(atom->twoByteRange(nogc))) {
branchLatin1String(str, fail);
}
}
// Call into the VM when the input is a rope or has a different encoding.
Label vmCall;
// Load the input string's characters.
Register stringChars = scratch;
loadStringCharsForCompare(str, atom, stringChars, &vmCall);
// Start comparing character by character.
branchIfNotStringCharsEquals(stringChars, atom, fail);
// Falls through if both strings are equal.
jump(&done);
bind(&vmCall);
}
// We have a non-atomized string with the same length. Call a helper
// function to do the comparison.
PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSString* str1, JSString* str2);
setupUnalignedABICall(scratch);
movePtr(ImmGCPtr(atom), scratch);
passABIArg(scratch);
passABIArg(str);
callWithABI<Fn, EqualStringsHelperPure>();
storeCallPointerResult(scratch);
MOZ_ASSERT(!volatileRegs.has(scratch));
PopRegsInMask(volatileRegs);
branchIfFalseBool(scratch, fail);
bind(&done);
}
void MacroAssembler::guardStringToInt32(Register str, Register output,
Register scratch,
LiveRegisterSet volatileRegs,
Label* fail) {
Label vmCall, done;
// Use indexed value as fast path if possible.
loadStringIndexValue(str, output, &vmCall);
jump(&done);
{
bind(&vmCall);
// Reserve space for holding the result int32_t of the call. Use
// pointer-size to avoid misaligning the stack on 64-bit platforms.
reserveStack(sizeof(uintptr_t));
moveStackPtrTo(output);
volatileRegs.takeUnchecked(scratch);
if (output.volatile_()) {
volatileRegs.addUnchecked(output);
}
PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSContext* cx, JSString* str, int32_t* result);
setupUnalignedABICall(scratch);
loadJSContext(scratch);
passABIArg(scratch);
passABIArg(str);
passABIArg(output);
callWithABI<Fn, GetInt32FromStringPure>();
storeCallPointerResult(scratch);
PopRegsInMask(volatileRegs);
Label ok;
branchIfTrueBool(scratch, &ok);
{
// OOM path, recovered by GetInt32FromStringPure.
//
// Use addToStackPtr instead of freeStack as freeStack tracks stack height
// flow-insensitively, and using it twice would confuse the stack height
// tracking.
addToStackPtr(Imm32(sizeof(uintptr_t)));
jump(fail);
}
bind(&ok);
load32(Address(output, 0), output);
freeStack(sizeof(uintptr_t));
}
bind(&done);
}
void MacroAssembler::generateBailoutTail(Register scratch,
Register bailoutInfo) {
Label bailoutFailed;
branchIfFalseBool(ReturnReg, &bailoutFailed);
// Finish bailing out to Baseline.
{
// Prepare a register set for use in this case.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()),
!regs.has(AsRegister(getStackPointer())));
regs.take(bailoutInfo);
Register temp = regs.takeAny();
#ifdef DEBUG
// Assert the stack pointer points to the JitFrameLayout header. Copying
// starts here.
Label ok;
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
temp);
branchStackPtr(Assembler::Equal, temp, &ok);
assumeUnreachable("Unexpected stack pointer value");
bind(&ok);
#endif
Register copyCur = regs.takeAny();
Register copyEnd = regs.takeAny();
// Copy data onto stack.
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)),
copyCur);
loadPtr(
Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)),
copyEnd);
{
Label copyLoop;
Label endOfCopy;
bind(©Loop);
branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
subPtr(Imm32(sizeof(uintptr_t)), copyCur);
subFromStackPtr(Imm32(sizeof(uintptr_t)));
loadPtr(Address(copyCur, 0), temp);
storePtr(temp, Address(getStackPointer(), 0));
jump(©Loop);
bind(&endOfCopy);
}
loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)),
FramePointer);
// Enter exit frame for the FinishBailoutToBaseline call.
pushFrameDescriptor(FrameType::BaselineJS);
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
push(FramePointer);
// No GC things to mark on the stack, push a bare token.
loadJSContext(scratch);
enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
// Save needed values onto stack temporarily.
push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
// Call a stub to free allocated memory and create arguments objects.
using Fn = bool (*)(BaselineBailoutInfo* bailoutInfoArg);
setupUnalignedABICall(temp);
passABIArg(bailoutInfo);
callWithABI<Fn, FinishBailoutToBaseline>(
ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
branchIfFalseBool(ReturnReg, exceptionLabel());
// Restore values where they need to be and resume execution.
AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
MOZ_ASSERT(!enterRegs.has(FramePointer));
Register jitcodeReg = enterRegs.takeAny();
pop(jitcodeReg);
// Discard exit frame.
addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
jump(jitcodeReg);
}
bind(&bailoutFailed);
{
// jit::Bailout or jit::InvalidationBailout failed and returned false. The
// Ion frame has already been discarded and the stack pointer points to the
// JitFrameLayout header. Turn it into an ExitFrameLayout, similar to
// EnsureUnwoundJitExitFrame, and call the exception handler.
loadJSContext(scratch);
enterFakeExitFrame(scratch, scratch, ExitFrameType::UnwoundJit);
jump(exceptionLabel());
}
}
void MacroAssembler::loadJitCodeRaw(Register func, Register dest) {
static_assert(BaseScript::offsetOfJitCodeRaw() ==
SelfHostedLazyScript::offsetOfJitCodeRaw(),
"SelfHostedLazyScript and BaseScript must use same layout for "
"jitCodeRaw_");
static_assert(
BaseScript::offsetOfJitCodeRaw() == wasm::JumpTableJitEntryOffset,
"Wasm exported functions jit entries must use same layout for "
"jitCodeRaw_");
loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
loadPtr(Address(dest, BaseScript::offsetOfJitCodeRaw()), dest);
}
void MacroAssembler::loadBaselineJitCodeRaw(Register func, Register dest,
Label* failure) {
// Load JitScript
loadPrivate(Address(func, JSFunction::offsetOfJitInfoOrScript()), dest);
if (failure) {
branchIfScriptHasNoJitScript(dest, failure);
}
loadJitScript(dest, dest);
// Load BaselineScript
loadPtr(Address(dest, JitScript::offsetOfBaselineScript()), dest);
if (failure) {
static_assert(DisabledScript < CompilingScript);
branchPtr(Assembler::BelowOrEqual, dest, ImmWord(CompilingScript), failure);
}
// Load Baseline jitcode
loadPtr(Address(dest, BaselineScript::offsetOfMethod()), dest);
loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
}
void MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest) {
if (framePtr != dest) {
movePtr(framePtr, dest);
}
subPtr(Imm32(BaselineFrame::Size()), dest);
}
void MacroAssembler::storeICScriptInJSContext(Register icScript) {
storePtr(icScript, AbsoluteAddress(runtime()->addressOfInlinedICScript()));
}
void MacroAssembler::handleFailure() {
// Re-entry code is irrelevant because the exception will leave the
// running function and never come back
TrampolinePtr excTail = runtime()->jitRuntime()->getExceptionTail();
jump(excTail);
}
void MacroAssembler::assumeUnreachable(const char* output) {
#ifdef JS_MASM_VERBOSE
if (!IsCompilingWasm()) {
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
Register temp = regs.takeAnyGeneral();
using Fn = void (*)(const char* output);
setupUnalignedABICall(temp);
movePtr(ImmPtr(output), temp);
passABIArg(temp);
callWithABI<Fn, AssumeUnreachable>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
PopRegsInMask(save);
}
#endif
breakpoint();
}
void MacroAssembler::printf(const char* output) {
#ifdef JS_MASM_VERBOSE
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
Register temp = regs.takeAnyGeneral();
using Fn = void (*)(const char* output);
setupUnalignedABICall(temp);
movePtr(ImmPtr(output), temp);
passABIArg(temp);
callWithABI<Fn, Printf0>();
PopRegsInMask(save);
#endif
}
void MacroAssembler::printf(const char* output, Register value) {
#ifdef JS_MASM_VERBOSE
AllocatableRegisterSet regs(RegisterSet::Volatile());
LiveRegisterSet save(regs.asLiveSet());
PushRegsInMask(save);
regs.takeUnchecked(value);
Register temp = regs.takeAnyGeneral();
using Fn = void (*)(const char* output, uintptr_t value);
setupUnalignedABICall(temp);
movePtr(ImmPtr(output), temp);
passABIArg(temp);
passABIArg(value);
callWithABI<Fn, Printf1>();
PopRegsInMask(save);
#endif
}
void MacroAssembler::convertInt32ValueToDouble(ValueOperand val) {
Label done;
branchTestInt32(Assembler::NotEqual, val, &done);
ScratchDoubleScope fpscratch(*this);
convertInt32ToDouble(val.payloadOrValueReg(), fpscratch);
boxDouble(fpscratch, val, fpscratch);
bind(&done);
}
void MacroAssembler::convertValueToFloatingPoint(
ValueOperand value, FloatRegister output, Register maybeTemp,
LiveRegisterSet volatileLiveRegs, Label* fail,
FloatingPointType outputType) {
Label isDouble, isInt32OrBool, isNull, done;
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestDouble(Assembler::Equal, tag, &isDouble);
branchTestInt32(Assembler::Equal, tag, &isInt32OrBool);
branchTestBoolean(Assembler::Equal, tag, &isInt32OrBool);
branchTestNull(Assembler::Equal, tag, &isNull);
branchTestUndefined(Assembler::NotEqual, tag, fail);
}
// fall-through: undefined
if (outputType == FloatingPointType::Float16 ||
outputType == FloatingPointType::Float32) {
loadConstantFloat32(float(GenericNaN()), output);
} else {
loadConstantDouble(GenericNaN(), output);
}
jump(&done);
bind(&isNull);
if (outputType == FloatingPointType::Float16 ||
outputType == FloatingPointType::Float32) {
loadConstantFloat32(0.0f, output);
} else {
loadConstantDouble(0.0, output);
}
jump(&done);
bind(&isInt32OrBool);
if (outputType == FloatingPointType::Float16) {
convertInt32ToFloat16(value.payloadOrValueReg(), output, maybeTemp,
volatileLiveRegs);
} else if (outputType == FloatingPointType::Float32) {
convertInt32ToFloat32(value.payloadOrValueReg(), output);
} else {
convertInt32ToDouble(value.payloadOrValueReg(), output);
}
jump(&done);
// On some non-multiAlias platforms, unboxDouble may use the scratch register,
// so do not merge code paths here.
bind(&isDouble);
if ((outputType == FloatingPointType::Float16 ||
outputType == FloatingPointType::Float32) &&
hasMultiAlias()) {
ScratchDoubleScope tmp(*this);
unboxDouble(value, tmp);
if (outputType == FloatingPointType::Float16) {
convertDoubleToFloat16(tmp, output, maybeTemp, volatileLiveRegs);
} else {
convertDoubleToFloat32(tmp, output);
}
} else {
FloatRegister tmp = output.asDouble();
unboxDouble(value, tmp);
if (outputType == FloatingPointType::Float16) {
convertDoubleToFloat16(tmp, output, maybeTemp, volatileLiveRegs);
} else if (outputType == FloatingPointType::Float32) {
convertDoubleToFloat32(tmp, output);
}
}
bind(&done);
}
void MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest,
bool widenFloatToDouble,
bool compilingWasm,
wasm::BytecodeOffset callOffset) {
ScratchDoubleScope fpscratch(*this);
if (widenFloatToDouble) {
convertFloat32ToDouble(src, fpscratch);
src = fpscratch;
}
MOZ_ASSERT(src.isDouble());
if (compilingWasm) {
Push(InstanceReg);
int32_t framePushedAfterInstance = framePushed();
setupWasmABICall();
passABIArg(src, ABIType::Float64);
int32_t instanceOffset = framePushed() - framePushedAfterInstance;
callWithABI(callOffset, wasm::SymbolicAddress::ToInt32,
mozilla::Some(instanceOffset));
storeCallInt32Result(dest);
Pop(InstanceReg);
} else {
using Fn = int32_t (*)(double);
setupUnalignedABICall(dest);
passABIArg(src, ABIType::Float64);
callWithABI<Fn, JS::ToInt32>(ABIType::General,
CheckUnsafeCallWithABI::DontCheckOther);
storeCallInt32Result(dest);
}
}
void MacroAssembler::convertValueToInt32(ValueOperand value, FloatRegister temp,
Register output, Label* fail,
bool negativeZeroCheck,
IntConversionInputKind conversion) {
Label done, isInt32, isBool, isDouble, isString;
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestInt32(Equal, tag, &isInt32);
branchTestDouble(Equal, tag, &isDouble);
if (conversion == IntConversionInputKind::Any) {
branchTestBoolean(Equal, tag, &isBool);
branchTestNull(Assembler::NotEqual, tag, fail);
} else {
jump(fail);
}
}
// The value is null - just emit 0.
if (conversion == IntConversionInputKind::Any) {
move32(Imm32(0), output);
jump(&done);
}
// Try converting double into integer.
{
bind(&isDouble);
unboxDouble(value, temp);
convertDoubleToInt32(temp, output, fail, negativeZeroCheck);
jump(&done);
}
// Just unbox a bool, the result is 0 or 1.
if (conversion == IntConversionInputKind::Any) {
bind(&isBool);
unboxBoolean(value, output);
jump(&done);
}
// Integers can be unboxed.
{
bind(&isInt32);
unboxInt32(value, output);
}
bind(&done);
}
void MacroAssembler::truncateValueToInt32(
ValueOperand value, Label* handleStringEntry, Label* handleStringRejoin,
Label* truncateDoubleSlow, Register stringReg, FloatRegister temp,
Register output, Label* fail) {
Label done, isInt32, isBool, isDouble, isNull, isString;
bool handleStrings = handleStringEntry && handleStringRejoin;
// |output| needs to be different from |stringReg| to load string indices.
MOZ_ASSERT_IF(handleStrings, stringReg != output);
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestInt32(Equal, tag, &isInt32);
branchTestDouble(Equal, tag, &isDouble);
branchTestBoolean(Equal, tag, &isBool);
branchTestNull(Equal, tag, &isNull);
if (handleStrings) {
branchTestString(Equal, tag, &isString);
}
branchTestUndefined(Assembler::NotEqual, tag, fail);
}
// The value is null or undefined in truncation contexts - just emit 0.
{
bind(&isNull);
move32(Imm32(0), output);
jump(&done);
}
// First try loading a string index. If that fails, try converting a string
// into a double, then jump to the double case.
Label handleStringIndex;
if (handleStrings) {
bind(&isString);
unboxString(value, stringReg);
loadStringIndexValue(stringReg, output, handleStringEntry);
jump(&done);
}
// Try converting double into integer.
{
bind(&isDouble);
unboxDouble(value, temp);
if (handleStrings) {
bind(handleStringRejoin);
}
branchTruncateDoubleMaybeModUint32(
temp, output, truncateDoubleSlow ? truncateDoubleSlow : fail);
jump(&done);
}
// Just unbox a bool, the result is 0 or 1.
{
bind(&isBool);
unboxBoolean(value, output);
jump(&done);
}
// Integers can be unboxed.
{
bind(&isInt32);
unboxInt32(value, output);
}
bind(&done);
}
void MacroAssembler::clampValueToUint8(ValueOperand value,
Label* handleStringEntry,
Label* handleStringRejoin,
Register stringReg, FloatRegister temp,
Register output, Label* fail) {
Label done, isInt32, isBool, isDouble, isNull, isString;
{
ScratchTagScope tag(*this, value);
splitTagForTest(value, tag);
branchTestInt32(Equal, tag, &isInt32);
branchTestDouble(Equal, tag, &isDouble);
branchTestBoolean(Equal, tag, &isBool);
branchTestNull(Equal, tag, &isNull);
branchTestString(Equal, tag, &isString);
branchTestUndefined(Assembler::NotEqual, tag, fail);
}
// The value is null or undefined in truncation contexts - just emit 0.
{
bind(&isNull);
move32(Imm32(0), output);
jump(&done);
}
// Try converting a string into a double, then jump to the double case.
{
bind(&isString);
unboxString(value, stringReg);
jump(handleStringEntry);
}
// Try converting double into integer.
{
bind(&isDouble);
unboxDouble(value, temp);
bind(handleStringRejoin);
clampDoubleToUint8(temp, output);
jump(&done);
}
// Just unbox a bool, the result is 0 or 1.
{
bind(&isBool);
unboxBoolean(value, output);
jump(&done);
}
// Integers can be unboxed.
{
bind(&isInt32);
unboxInt32(value, output);
clampIntToUint8(output);
}
bind(&done);
}
void MacroAssembler::finish() {
if (failureLabel_.used()) {
bind(&failureLabel_);
handleFailure();
}
MacroAssemblerSpecific::finish();
MOZ_RELEASE_ASSERT(
size() <= MaxCodeBytesPerProcess,
"AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
if (bytesNeeded() > MaxCodeBytesPerProcess) {
setOOM();
}
}
void MacroAssembler::link(JitCode* code) {
MOZ_ASSERT(!oom());
linkProfilerCallSites(code);
}
MacroAssembler::AutoProfilerCallInstrumentation::
AutoProfilerCallInstrumentation(MacroAssembler& masm) {
if (!masm.emitProfilingInstrumentation_) {
return;
}
Register reg = CallTempReg0;
Register reg2 = CallTempReg1;
masm.push(reg);
masm.push(reg2);
CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
masm.loadJSContext(reg2);
masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
masm.storePtr(reg,
Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
masm.appendProfilerCallSite(label);
masm.pop(reg2);
masm.pop(reg);
}
void MacroAssembler::linkProfilerCallSites(JitCode* code) {
for (size_t i = 0; i < profilerCallSites_.length(); i++) {
CodeOffset offset = profilerCallSites_[i];
CodeLocationLabel location(code, offset);
PatchDataWithValueCheck(location, ImmPtr(location.raw()),
ImmPtr((void*)-1));
}
}
void MacroAssembler::alignJitStackBasedOnNArgs(Register nargs,
bool countIncludesThis) {
// The stack should already be aligned to the size of a value.
assertStackAlignment(sizeof(Value), 0);
static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
"JitStackValueAlignment is either 1 or 2.");
if (JitStackValueAlignment == 1) {
return;
}
// A jit frame is composed of the following:
//
// [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
// \________JitFrameLayout_________/
// (The stack grows this way --->)
//
// We want to ensure that |raddr|, the return address, is 16-byte aligned.
// (Note: if 8-byte alignment was sufficient, we would have already
// returned above.)
// JitFrameLayout does not affect the alignment, so we can ignore it.
static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
"JitFrameLayout doesn't affect stack alignment");
// Therefore, we need to ensure that |this| is aligned.
// This implies that |argN| must be aligned if N is even,
// and offset by |sizeof(Value)| if N is odd.
// Depending on the context of the caller, it may be easier to pass in a
// register that has already been modified to include |this|. If that is the
// case, we want to flip the direction of the test.
Assembler::Condition condition =
countIncludesThis ? Assembler::NonZero : Assembler::Zero;
Label alignmentIsOffset, end;
branchTestPtr(condition, nargs, Imm32(1), &alignmentIsOffset);
// |argN| should be aligned to 16 bytes.
andToStackPtr(Imm32(~(JitStackAlignment - 1)));
jump(&end);
// |argN| should be offset by 8 bytes from 16-byte alignment.
// We already know that it is 8-byte aligned, so the only possibilities are:
// a) It is 16-byte aligned, and we must offset it by 8 bytes.
// b) It is not 16-byte aligned, and therefore already has the right offset.
// Therefore, we test to see if it is 16-byte aligned, and adjust it if it is.
bind(&alignmentIsOffset);
branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
subFromStackPtr(Imm32(sizeof(Value)));
bind(&end);
}
void MacroAssembler::alignJitStackBasedOnNArgs(uint32_t argc,
bool countIncludesThis) {
// The stack should already be aligned to the size of a value.
assertStackAlignment(sizeof(Value), 0);
static_assert(JitStackValueAlignment == 1 || JitStackValueAlignment == 2,
"JitStackValueAlignment is either 1 or 2.");
if (JitStackValueAlignment == 1) {
return;
}
// See above for full explanation.
uint32_t nArgs = argc + !countIncludesThis;
if (nArgs % 2 == 0) {
// |argN| should be 16-byte aligned
andToStackPtr(Imm32(~(JitStackAlignment - 1)));
} else {
// |argN| must be 16-byte aligned if argc is even,
// and offset by 8 if argc is odd.
Label end;
branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
subFromStackPtr(Imm32(sizeof(Value)));
bind(&end);
assertStackAlignment(JitStackAlignment, sizeof(Value));
}
}
// ===============================================================
MacroAssembler::MacroAssembler(TempAllocator& alloc,
CompileRuntime* maybeRuntime,
CompileRealm* maybeRealm)
: maybeRuntime_(maybeRuntime),
maybeRealm_(maybeRealm),
framePushed_(0),
abiArgs_(/* This will be overwritten for every ABI call, the initial value
doesn't matter */
ABIKind::System),
#ifdef DEBUG
inCall_(false),
#endif
dynamicAlignment_(false),
emitProfilingInstrumentation_(false) {
moveResolver_.setAllocator(alloc);
}
StackMacroAssembler::StackMacroAssembler(JSContext* cx, TempAllocator& alloc)
: MacroAssembler(alloc, CompileRuntime::get(cx->runtime()),
CompileRealm::get(cx->realm())) {}
OffThreadMacroAssembler::OffThreadMacroAssembler(TempAllocator& alloc,
CompileRealm* realm)
: MacroAssembler(alloc, realm->runtime(), realm) {
MOZ_ASSERT(CurrentThreadIsOffThreadCompiling());
}
WasmMacroAssembler::WasmMacroAssembler(TempAllocator& alloc, bool limitedSize)
: MacroAssembler(alloc) {
#if defined(JS_CODEGEN_ARM64)
// Stubs + builtins + the baseline compiler all require the native SP,
// not the PSP.
SetStackPointer64(sp);
#endif
if (!limitedSize) {
setUnlimitedBuffer();
}
}
bool MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr,
AutoSaveLiveRegisters& save) {
return buildOOLFakeExitFrame(fakeReturnAddr);
}
#ifndef JS_CODEGEN_ARM64
void MacroAssembler::subFromStackPtr(Register reg) {
subPtr(reg, getStackPointer());
}
#endif // JS_CODEGEN_ARM64
//{{{ check_macroassembler_style
// ===============================================================
// Stack manipulation functions.
void MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set) {
PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
}
void MacroAssembler::PopRegsInMask(LiveRegisterSet set) {
PopRegsInMaskIgnore(set, LiveRegisterSet());
}
void MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set) {
PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
}
void MacroAssembler::Push(PropertyKey key, Register scratchReg) {
if (key.isGCThing()) {
// If we're pushing a gcthing, then we can't just push the tagged key
// value since the GC won't have any idea that the push instruction
// carries a reference to a gcthing. Need to unpack the pointer,
// push it using ImmGCPtr, and then rematerialize the PropertyKey at
// runtime.
if (key.isString()) {
JSString* str = key.toString();
MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
static_assert(PropertyKey::StringTypeTag == 0,
"need to orPtr StringTypeTag if it's not 0");
Push(ImmGCPtr(str));
} else {
MOZ_ASSERT(key.isSymbol());
movePropertyKey(key, scratchReg);
Push(scratchReg);
}
} else {
MOZ_ASSERT(key.isInt());
Push(ImmWord(key.asRawBits()));
}
}
void MacroAssembler::moveValue(const TypedOrValueRegister& src,
const ValueOperand& dest) {
if (src.hasValue()) {
moveValue(src.valueReg(), dest);
return;
}
MIRType type = src.type();
AnyRegister reg = src.typedReg();
if (!IsFloatingPointType(type)) {
boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
return;
}
ScratchDoubleScope scratch(*this);
FloatRegister freg = reg.fpu();
if (type == MIRType::Float32) {
convertFloat32ToDouble(freg, scratch);
freg = scratch;
}
boxDouble(freg, dest, scratch);
}
void MacroAssembler::movePropertyKey(PropertyKey key, Register dest) {
if (key.isGCThing()) {
// See comment in |Push(PropertyKey, ...)| above for an explanation.
if (key.isString()) {
JSString* str = key.toString();
MOZ_ASSERT((uintptr_t(str) & PropertyKey::TypeMask) == 0);
static_assert(PropertyKey::StringTypeTag == 0,
"need to orPtr StringTypeTag tag if it's not 0");
movePtr(ImmGCPtr(str), dest);
} else {
MOZ_ASSERT(key.isSymbol());
JS::Symbol* sym = key.toSymbol();
movePtr(ImmGCPtr(sym), dest);
orPtr(Imm32(PropertyKey::SymbolTypeTag), dest);
}
} else {
MOZ_ASSERT(key.isInt());
movePtr(ImmWord(key.asRawBits()), dest);
}
}
void MacroAssembler::Push(TypedOrValueRegister v) {
if (v.hasValue()) {
Push(v.valueReg());
} else if (IsFloatingPointType(v.type())) {
FloatRegister reg = v.typedReg().fpu();
if (v.type() == MIRType::Float32) {
ScratchDoubleScope fpscratch(*this);
convertFloat32ToDouble(reg, fpscratch);
PushBoxed(fpscratch);
} else {
PushBoxed(reg);
}
} else {
Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
}
}
void MacroAssembler::Push(const ConstantOrRegister& v) {
if (v.constant()) {
Push(v.value());
} else {
Push(v.reg());
}
}
void MacroAssembler::Push(const Address& addr) {
push(addr);
framePushed_ += sizeof(uintptr_t);
}
void MacroAssembler::Push(const ValueOperand& val) {
pushValue(val);
framePushed_ += sizeof(Value);
}
void MacroAssembler::Push(const Value& val) {
pushValue(val);
framePushed_ += sizeof(Value);
}
void MacroAssembler::Push(JSValueType type, Register reg) {
pushValue(type, reg);
framePushed_ += sizeof(Value);
}
void MacroAssembler::Push(const Register64 reg) {
#if JS_BITS_PER_WORD == 64
Push(reg.reg);
#else
MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
Push(reg.high);
Push(reg.low);
#endif
}
void MacroAssembler::Pop(const Register64 reg) {
#if JS_BITS_PER_WORD == 64
Pop(reg.reg);
#else
MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Big-endian not supported.");
Pop(reg.low);
Pop(reg.high);
#endif
}
void MacroAssembler::PushEmptyRooted(VMFunctionData::RootType rootType) {
switch (rootType) {
case VMFunctionData::RootNone:
MOZ_CRASH("Handle must have root type");
case VMFunctionData::RootObject:
case VMFunctionData::RootString:
case VMFunctionData::RootCell:
case VMFunctionData::RootBigInt:
Push(ImmPtr(nullptr));
break;
case VMFunctionData::RootValue:
Push(UndefinedValue());
break;
case VMFunctionData::RootId:
Push(ImmWord(JS::PropertyKey::Void().asRawBits()));
break;
}
}
void MacroAssembler::adjustStack(int amount) {
if (amount > 0) {
freeStack(amount);
} else if (amount < 0) {
reserveStack(-amount);
}
}
void MacroAssembler::freeStack(uint32_t amount) {
MOZ_ASSERT(amount <= framePushed_);
if (amount) {
addToStackPtr(Imm32(amount));
}
framePushed_ -= amount;
}
void MacroAssembler::reserveVMFunctionOutParamSpace(const VMFunctionData& f) {
switch (f.outParam) {
case Type_Handle:
PushEmptyRooted(f.outParamRootType);
break;
case Type_Value:
case Type_Double:
case Type_Pointer:
case Type_Int32:
case Type_Bool:
reserveStack(f.sizeOfOutParamStackSlot());
break;
case Type_Void:
break;
case Type_Cell:
MOZ_CRASH("Unexpected outparam type");
}
}
void MacroAssembler::loadVMFunctionOutParam(const VMFunctionData& f,
const Address& addr) {
switch (f.outParam) {
case Type_Handle:
switch (f.outParamRootType) {
case VMFunctionData::RootNone:
MOZ_CRASH("Handle must have root type");
case VMFunctionData::RootObject:
case VMFunctionData::RootString:
case VMFunctionData::RootCell:
case VMFunctionData::RootBigInt:
case VMFunctionData::RootId:
loadPtr(addr, ReturnReg);
break;
case VMFunctionData::RootValue:
loadValue(addr, JSReturnOperand);
break;
}
break;
case Type_Value:
loadValue(addr, JSReturnOperand);
break;
case Type_Int32:
load32(addr, ReturnReg);
break;
case Type_Bool:
load8ZeroExtend(addr, ReturnReg);
break;
case Type_Double:
loadDouble(addr, ReturnDoubleReg);
break;
case Type_Pointer:
loadPtr(addr, ReturnReg);
break;
case Type_Void:
break;
case Type_Cell:
MOZ_CRASH("Unexpected outparam type");
}
}
// ===============================================================
// ABI function calls.
void MacroAssembler::setupABICallHelper(ABIKind kind) {
#ifdef DEBUG
MOZ_ASSERT(!inCall_);
inCall_ = true;
#endif
#ifdef JS_SIMULATOR
signature_ = 0;
#endif
// Reinitialize the ABIArg generator.
abiArgs_ = ABIArgGenerator(kind);
#if defined(JS_CODEGEN_ARM)
if (kind != ABIKind::Wasm) {
// On ARM, we need to know what ABI we are using.
abiArgs_.setUseHardFp(ARMFlags::UseHardFpABI());
}
#endif
}
void MacroAssembler::setupNativeABICall() {
setupABICallHelper(ABIKind::System);
}
void MacroAssembler::setupWasmABICall() {
MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
setupABICallHelper(ABIKind::System);
dynamicAlignment_ = false;
}
void MacroAssembler::setupUnalignedABICallDontSaveRestoreSP() {
andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
setFramePushed(0); // Required for aligned callWithABI.
setupAlignedABICall();
}
void MacroAssembler::setupAlignedABICall() {
MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
setupNativeABICall();
dynamicAlignment_ = false;
}
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
void MacroAssembler::wasmCheckUnsafeCallWithABIPre() {
// Set the JSContext::inUnsafeCallWithABI flag.
loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()),
ABINonArgReturnReg0);
Address flagAddr(ABINonArgReturnReg0,
JSContext::offsetOfInUnsafeCallWithABI());
store32(Imm32(1), flagAddr);
}
void MacroAssembler::wasmCheckUnsafeCallWithABIPost() {
// Check JSContext::inUnsafeCallWithABI was cleared as expected.
Label ok;
// InstanceReg is invariant in the system ABI, so we can use it here.
loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()),
ABINonArgReturnReg0);
Address flagAddr(ABINonArgReturnReg0,
JSContext::offsetOfInUnsafeCallWithABI());
branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
bind(&ok);
}
#endif // JS_CHECK_UNSAFE_CALL_WITH_ABI
void MacroAssembler::passABIArg(const MoveOperand& from, ABIType type) {
MOZ_ASSERT(inCall_);
appendSignatureType(type);
ABIArg arg;
MoveOp::Type moveType;
switch (type) {
case ABIType::Float32:
arg = abiArgs_.next(MIRType::Float32);
moveType = MoveOp::FLOAT32;
break;
case ABIType::Float64:
arg = abiArgs_.next(MIRType::Double);
moveType = MoveOp::DOUBLE;
break;
case ABIType::General:
arg = abiArgs_.next(MIRType::Pointer);
moveType = MoveOp::GENERAL;
break;
default:
MOZ_CRASH("Unexpected argument type");
}
MoveOperand to(*this, arg);
if (from == to) {
return;
}
if (oom()) {
return;
}
propagateOOM(moveResolver_.addMove(from, to, moveType));
}
void MacroAssembler::callWithABINoProfiler(void* fun, ABIType result,
CheckUnsafeCallWithABI check) {
appendSignatureType(result);
#ifdef JS_SIMULATOR
fun = Simulator::RedirectNativeFunction(fun, signature());
#endif
uint32_t stackAdjust;
callWithABIPre(&stackAdjust);
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
if (check == CheckUnsafeCallWithABI::Check) {
// Set the JSContext::inUnsafeCallWithABI flag.
push(ReturnReg);
loadJSContext(ReturnReg);
Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
store32(Imm32(1), flagAddr);
pop(ReturnReg);
// On arm64, SP may be < PSP now (that's OK).
}
#endif
call(ImmPtr(fun));
callWithABIPost(stackAdjust, result);
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
if (check == CheckUnsafeCallWithABI::Check) {
// Check JSContext::inUnsafeCallWithABI was cleared as expected.
Label ok;
push(ReturnReg);
loadJSContext(ReturnReg);
Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
bind(&ok);
pop(ReturnReg);
// On arm64, SP may be < PSP now (that's OK).
}
#endif
}
CodeOffset MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode,
wasm::SymbolicAddress imm,
mozilla::Maybe<int32_t> instanceOffset,
ABIType result) {
uint32_t stackAdjust;
callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
// The instance register is used in builtin thunks and must be set.
bool needsBuiltinThunk = wasm::NeedsBuiltinThunk(imm);
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
// The builtin thunk exits the JIT activation, if we don't have one we must
// use AutoUnsafeCallWithABI inside the builtin and check that here.
bool checkUnsafeCallWithABI = !needsBuiltinThunk;
#else
bool checkUnsafeCallWithABI = false;
#endif
if (needsBuiltinThunk || checkUnsafeCallWithABI) {
if (instanceOffset) {
loadPtr(Address(getStackPointer(), *instanceOffset + stackAdjust),
InstanceReg);
} else {
MOZ_CRASH("callWithABI missing instanceOffset");
}
}
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
if (checkUnsafeCallWithABI) {
wasmCheckUnsafeCallWithABIPre();
}
#endif
CodeOffset raOffset = call(
wasm::CallSiteDesc(bytecode.offset(), wasm::CallSiteKind::Symbolic), imm);
callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
#ifdef JS_CHECK_UNSAFE_CALL_WITH_ABI
if (checkUnsafeCallWithABI) {
wasmCheckUnsafeCallWithABIPost();
}
#endif
return raOffset;
}
void MacroAssembler::callDebugWithABI(wasm::SymbolicAddress imm,
ABIType result) {
uint32_t stackAdjust;
callWithABIPre(&stackAdjust, /* callFromWasm = */ false);
call(imm);
callWithABIPost(stackAdjust, result, /* callFromWasm = */ false);
}
// ===============================================================
// Exit frame footer.
void MacroAssembler::linkExitFrame(Register cxreg, Register scratch) {
loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
}
// ===============================================================
// Simple value-shuffling helpers, to hide MoveResolver verbosity
// in common cases.
void MacroAssembler::moveRegPair(Register src0, Register src1, Register dst0,
Register dst1, MoveOp::Type type) {
MoveResolver& moves = moveResolver();
if (src0 != dst0) {
propagateOOM(moves.addMove(MoveOperand(src0), MoveOperand(dst0), type));
}
if (src1 != dst1) {
propagateOOM(moves.addMove(MoveOperand(src1), MoveOperand(dst1), type));
}
propagateOOM(moves.resolve());
if (oom()) {
return;
}
MoveEmitter emitter(*this);
emitter.emit(moves);
emitter.finish();
}
// ===============================================================
// Arithmetic functions
void MacroAssembler::pow32(Register base, Register power, Register dest,
Register temp1, Register temp2, Label* onOver) {
// Inline int32-specialized implementation of js::powi with overflow
// detection.
move32(Imm32(1), dest); // result = 1
// x^y where x == 1 returns 1 for any y.
Label done;
branch32(Assembler::Equal, base, Imm32(1), &done);
// x^y where y < 0 returns a non-int32 value for any x != 1. Except when y is
// large enough so that the result is no longer representable as a double with
// fractional parts. We can't easily determine when y is too large, so we bail
// here.
// Note: it's important for this condition to match the code in CacheIR.cpp
// (CanAttachInt32Pow) to prevent failure loops.
branchTest32(Assembler::Signed, power, power, onOver);
move32(base, temp1); // runningSquare = x
move32(power, temp2); // n = y
Label start;
jump(&start);
Label loop;
bind(&loop);
// runningSquare *= runningSquare
branchMul32(Assembler::Overflow, temp1, temp1, onOver);
bind(&start);
// if ((n & 1) != 0) result *= runningSquare
Label even;
branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
branchMul32(Assembler::Overflow, temp1, dest, onOver);
bind(&even);
// n >>= 1
// if (n == 0) return result
branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
bind(&done);
}
void MacroAssembler::powPtr(Register base, Register power, Register dest,
Register temp1, Register temp2, Label* onOver) {
// Inline intptr-specialized implementation of BigInt::pow with overflow
// detection.
// Negative exponents are disallowed for any BigInts.
branchTestPtr(Assembler::Signed, power, power, onOver);
movePtr(ImmWord(1), dest); // result = 1
// x^y where x == 1 returns 1 for any y.
Label done;
branchPtr(Assembler::Equal, base, ImmWord(1), &done);
// x^y where x == -1 returns 1 for even y, and -1 for odd y.
Label notNegativeOne;
branchPtr(Assembler::NotEqual, base, ImmWord(-1), ¬NegativeOne);
test32MovePtr(Assembler::NonZero, power, Imm32(1), base, dest);
jump(&done);
bind(¬NegativeOne);
// x ** y with |x| > 1 and y >= DigitBits can't be pointer-sized.
branchPtr(Assembler::GreaterThanOrEqual, power, Imm32(BigInt::DigitBits),
onOver);
movePtr(base, temp1); // runningSquare = x
movePtr(power, temp2); // n = y
Label start;
jump(&start);
Label loop;
bind(&loop);
// runningSquare *= runningSquare
branchMulPtr(Assembler::Overflow, temp1, temp1, onOver);
bind(&start);
// if ((n & 1) != 0) result *= runningSquare
Label even;
branchTest32(Assembler::Zero, temp2, Imm32(1), &even);
branchMulPtr(Assembler::Overflow, temp1, dest, onOver);
bind(&even);
// n >>= 1
// if (n == 0) return result
branchRshift32(Assembler::NonZero, Imm32(1), temp2, &loop);
bind(&done);
}
void MacroAssembler::signInt32(Register input, Register output) {
MOZ_ASSERT(input != output);
rshift32Arithmetic(Imm32(31), input, output);
or32(Imm32(1), output);
cmp32Move32(Assembler::Equal, input, Imm32(0), input, output);
}
void MacroAssembler::signDouble(FloatRegister input, FloatRegister output) {
MOZ_ASSERT(input != output);
Label done, zeroOrNaN, negative;
loadConstantDouble(0.0, output);
branchDouble(Assembler::DoubleEqualOrUnordered, input, output, &zeroOrNaN);
branchDouble(Assembler::DoubleLessThan, input, output, &negative);
loadConstantDouble(1.0, output);
jump(&done);
bind(&negative);
loadConstantDouble(-1.0, output);
jump(&done);
bind(&zeroOrNaN);
moveDouble(input, output);
bind(&done);
}
void MacroAssembler::signDoubleToInt32(FloatRegister input, Register output,
FloatRegister temp, Label* fail) {
MOZ_ASSERT(input != temp);
Label done, zeroOrNaN, negative;
loadConstantDouble(0.0, temp);
branchDouble(Assembler::DoubleEqualOrUnordered, input, temp, &zeroOrNaN);
branchDouble(Assembler::DoubleLessThan, input, temp, &negative);
move32(Imm32(1), output);
jump(&done);
bind(&negative);
move32(Imm32(-1), output);
jump(&done);
// Fail for NaN and negative zero.
bind(&zeroOrNaN);
branchDouble(Assembler::DoubleUnordered, input, input, fail);
// The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
// is -Infinity instead of Infinity.
loadConstantDouble(1.0, temp);
divDouble(input, temp);
branchDouble(Assembler::DoubleLessThan, temp, input, fail);
move32(Imm32(0), output);
bind(&done);
}
void MacroAssembler::randomDouble(Register rng, FloatRegister dest,
Register64 temp0, Register64 temp1) {
using mozilla::non_crypto::XorShift128PlusRNG;
static_assert(
sizeof(XorShift128PlusRNG) == 2 * sizeof(uint64_t),
"Code below assumes XorShift128PlusRNG contains two uint64_t values");
Address state0Addr(rng, XorShift128PlusRNG::offsetOfState0());
Address state1Addr(rng, XorShift128PlusRNG::offsetOfState1());
Register64 s0Reg = temp0;
Register64 s1Reg = temp1;
// uint64_t s1 = mState[0];
load64(state0Addr, s1Reg);
// s1 ^= s1 << 23;
move64(s1Reg, s0Reg);
lshift64(Imm32(23), s1Reg);
xor64(s0Reg, s1Reg);
// s1 ^= s1 >> 17
move64(s1Reg, s0Reg);
rshift64(Imm32(17), s1Reg);
xor64(s0Reg, s1Reg);
// const uint64_t s0 = mState[1];
load64(state1Addr, s0Reg);
// mState[0] = s0;
store64(s0Reg, state0Addr);
// s1 ^= s0
xor64(s0Reg, s1Reg);
// s1 ^= s0 >> 26
rshift64(Imm32(26), s0Reg);
xor64(s0Reg, s1Reg);
// mState[1] = s1
store64(s1Reg, state1Addr);
// s1 += mState[0]
load64(state0Addr, s0Reg);
add64(s0Reg, s1Reg);
// See comment in XorShift128PlusRNG::nextDouble().
static constexpr int MantissaBits =
mozilla::FloatingPoint<double>::kExponentShift + 1;
static constexpr double ScaleInv = double(1) / (1ULL << MantissaBits);
and64(Imm64((1ULL << MantissaBits) - 1), s1Reg);
// Note: we know s1Reg isn't signed after the and64 so we can use the faster
// convertInt64ToDouble instead of convertUInt64ToDouble.
convertInt64ToDouble(s1Reg, dest);
// dest *= ScaleInv
mulDoublePtr(ImmPtr(&ScaleInv), s0Reg.scratchReg(), dest);
}
void MacroAssembler::sameValueDouble(FloatRegister left, FloatRegister right,
FloatRegister temp, Register dest) {
Label nonEqual, isSameValue, isNotSameValue;
branchDouble(Assembler::DoubleNotEqualOrUnordered, left, right, &nonEqual);
{
// First, test for being equal to 0.0, which also includes -0.0.
loadConstantDouble(0.0, temp);
branchDouble(Assembler::DoubleNotEqual, left, temp, &isSameValue);
// The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0
// is -Infinity instead of Infinity.
Label isNegInf;
loadConstantDouble(1.0, temp);
divDouble(left, temp);
branchDouble(Assembler::DoubleLessThan, temp, left, &isNegInf);
{
loadConstantDouble(1.0, temp);
divDouble(right, temp);
branchDouble(Assembler::DoubleGreaterThan, temp, right, &isSameValue);
jump(&isNotSameValue);
}
bind(&isNegInf);
{
loadConstantDouble(1.0, temp);
divDouble(right, temp);
branchDouble(Assembler::DoubleLessThan, temp, right, &isSameValue);
jump(&isNotSameValue);
}
}
bind(&nonEqual);
{
// Test if both values are NaN.
branchDouble(Assembler::DoubleOrdered, left, left, &isNotSameValue);
branchDouble(Assembler::DoubleOrdered, right, right, &isNotSameValue);
}
Label done;
bind(&isSameValue);
move32(Imm32(1), dest);
jump(&done);
bind(&isNotSameValue);
move32(Imm32(0), dest);
bind(&done);
}
void MacroAssembler::minMaxArrayInt32(Register array, Register result,
Register temp1, Register temp2,
Register temp3, bool isMax, Label* fail) {
// array must be a packed array. Load its elements.
Register elements = temp1;
loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
// Load the length and guard that it is non-zero.
Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
load32(lengthAddr, temp3);
branchTest32(Assembler::Zero, temp3, temp3, fail);
// Compute the address of the last element.
Register elementsEnd = temp2;
BaseObjectElementIndex elementsEndAddr(elements, temp3,
-int32_t(sizeof(Value)));
computeEffectiveAddress(elementsEndAddr, elementsEnd);
// Load the first element into result.
fallibleUnboxInt32(Address(elements, 0), result, fail);
Label loop, done;
bind(&loop);
// Check whether we're done.
branchPtr(Assembler::Equal, elements, elementsEnd, &done);
// If not, advance to the next element and load it.
addPtr(Imm32(sizeof(Value)), elements);
fallibleUnboxInt32(Address(elements, 0), temp3, fail);
// Update result if necessary.
Assembler::Condition cond =
isMax ? Assembler::GreaterThan : Assembler::LessThan;
cmp32Move32(cond, temp3, result, temp3, result);
jump(&loop);
bind(&done);
}
void MacroAssembler::minMaxArrayNumber(Register array, FloatRegister result,
FloatRegister floatTemp, Register temp1,
Register temp2, bool isMax,
Label* fail) {
// array must be a packed array. Load its elements.
Register elements = temp1;
loadPtr(Address(array, NativeObject::offsetOfElements()), elements);
// Load the length and check if the array is empty.
Label isEmpty;
Address lengthAddr(elements, ObjectElements::offsetOfInitializedLength());
load32(lengthAddr, temp2);
branchTest32(Assembler::Zero, temp2, temp2, &isEmpty);
// Compute the address of the last element.
Register elementsEnd = temp2;
BaseObjectElementIndex elementsEndAddr(elements, temp2,
-int32_t(sizeof(Value)));
computeEffectiveAddress(elementsEndAddr, elementsEnd);
// Load the first element into result.
ensureDouble(Address(elements, 0), result, fail);
Label loop, done;
bind(&loop);
// Check whether we're done.
branchPtr(Assembler::Equal, elements, elementsEnd, &done);
// If not, advance to the next element and load it into floatTemp.
addPtr(Imm32(sizeof(Value)), elements);
ensureDouble(Address(elements, 0), floatTemp, fail);
// Update result if necessary.
if (isMax) {
maxDouble(floatTemp, result, /* handleNaN = */ true);
} else {
minDouble(floatTemp, result, /* handleNaN = */ true);
}
jump(&loop);
// With no arguments, min/max return +Infinity/-Infinity respectively.
bind(&isEmpty);
if (isMax) {
loadConstantDouble(mozilla::NegativeInfinity<double>(), result);
} else {
loadConstantDouble(mozilla::PositiveInfinity<double>(), result);
}
bind(&done);
}
void MacroAssembler::loadRegExpLastIndex(Register regexp, Register string,
Register lastIndex,
Label* notFoundZeroLastIndex) {
Address flagsSlot(regexp, RegExpObject::offsetOfFlags());
Address lastIndexSlot(regexp, RegExpObject::offsetOfLastIndex());
Address stringLength(string, JSString::offsetOfLength());
Label notGlobalOrSticky, loadedLastIndex;
branchTest32(Assembler::Zero, flagsSlot,
Imm32(JS::RegExpFlag::Global | JS::RegExpFlag::Sticky),
¬GlobalOrSticky);
{
// It's a global or sticky regular expression. Emit the following code:
//
// lastIndex = regexp.lastIndex
// if lastIndex > string.length:
// jump to notFoundZeroLastIndex (skip the regexp match/test operation)
//
// The `notFoundZeroLastIndex` code should set regexp.lastIndex to 0 and
// treat this as a not-found result.
//
// See steps 5-8 in js::RegExpBuiltinExec.
//
// Earlier guards must have ensured regexp.lastIndex is a non-negative
// integer.
#ifdef DEBUG
{
Label ok;
branchTestInt32(Assembler::Equal, lastIndexSlot, &ok);
assumeUnreachable("Expected int32 value for lastIndex");
bind(&ok);
}
#endif
unboxInt32(lastIndexSlot, lastIndex);
#ifdef DEBUG
{
Label ok;
branchTest32(Assembler::NotSigned, lastIndex, lastIndex, &ok);
assumeUnreachable("Expected non-negative lastIndex");
bind(&ok);
}
#endif
branch32(Assembler::Below, stringLength, lastIndex, notFoundZeroLastIndex);
jump(&loadedLastIndex);
}
bind(¬GlobalOrSticky);
move32(Imm32(0), lastIndex);
bind(&loadedLastIndex);
}
void MacroAssembler::loadAndClearRegExpSearcherLastLimit(Register result,
Register scratch) {
MOZ_ASSERT(result != scratch);
loadJSContext(scratch);
Address limitField(scratch, JSContext::offsetOfRegExpSearcherLastLimit());
load32(limitField, result);
#ifdef DEBUG
Label ok;
branch32(Assembler::NotEqual, result, Imm32(RegExpSearcherLastLimitSentinel),
&ok);
assumeUnreachable("Unexpected sentinel for regExpSearcherLastLimit");
bind(&ok);
store32(Imm32(RegExpSearcherLastLimitSentinel), limitField);
#endif
}
void MacroAssembler::loadParsedRegExpShared(Register regexp, Register result,
Label* unparsed) {
Address sharedSlot(regexp, RegExpObject::offsetOfShared());
branchTestUndefined(Assembler::Equal, sharedSlot, unparsed);
unboxNonDouble(sharedSlot, result, JSVAL_TYPE_PRIVATE_GCTHING);
static_assert(sizeof(RegExpShared::Kind) == sizeof(uint32_t));
branch32(Assembler::Equal, Address(result, RegExpShared::offsetOfKind()),
Imm32(int32_t(RegExpShared::Kind::Unparsed)), unparsed);
}
// ===============================================================
// Branch functions
void MacroAssembler::loadFunctionLength(Register func,
Register funFlagsAndArgCount,
Register output, Label* slowPath) {
#ifdef DEBUG
{
// These flags should already have been checked by caller.
Label ok;
uint32_t FlagsToCheck =
FunctionFlags::SELFHOSTLAZY | FunctionFlags::RESOLVED_LENGTH;
branchTest32(Assembler::Zero, funFlagsAndArgCount, Imm32(FlagsToCheck),
&ok);
assumeUnreachable("The function flags should already have been checked.");
bind(&ok);
}
#endif // DEBUG
// NOTE: `funFlagsAndArgCount` and `output` must be allowed to alias.
// Load the target function's length.
Label isInterpreted, lengthLoaded;
branchTest32(Assembler::NonZero, funFlagsAndArgCount,
Imm32(FunctionFlags::BASESCRIPT), &isInterpreted);