Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineCacheIRCompiler.h"
#include "gc/GC.h"
#include "jit/CacheIR.h"
#include "jit/CacheIRCloner.h"
#include "jit/CacheIRWriter.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/JitZone.h"
#include "jit/Linker.h"
#include "jit/MoveEmitter.h"
#include "jit/RegExpStubConstants.h"
#include "jit/SharedICHelpers.h"
#include "jit/VMFunctions.h"
#include "js/experimental/JitInfo.h" // JSJitInfo
#include "js/friend/DOMProxy.h" // JS::ExpandoAndGeneration
#include "proxy/DeadObjectProxy.h"
#include "proxy/Proxy.h"
#include "util/Unicode.h"
#include "vm/StaticStrings.h"
#include "jit/JitScript-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/SharedICHelpers-inl.h"
#include "jit/VMFunctionList-inl.h"
#include "vm/List-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::Maybe;
using JS::ExpandoAndGeneration;
namespace js {
namespace jit {
static uint32_t GetICStackValueOffset() {
uint32_t offset = ICStackValueOffset;
if (JitOptions.enableICFramePointers) {
#ifdef JS_USE_LINK_REGISTER
// The frame pointer and return address are also on the stack.
offset += 2 * sizeof(uintptr_t);
#else
// The frame pointer is also on the stack.
offset += sizeof(uintptr_t);
#endif
}
return offset;
}
static void PushICFrameRegs(MacroAssembler& masm) {
MOZ_ASSERT(JitOptions.enableICFramePointers);
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
masm.push(FramePointer);
}
static void PopICFrameRegs(MacroAssembler& masm) {
MOZ_ASSERT(JitOptions.enableICFramePointers);
masm.pop(FramePointer);
#ifdef JS_USE_LINK_REGISTER
masm.popReturnAddress();
#endif
}
Address CacheRegisterAllocator::addressOf(MacroAssembler& masm,
BaselineFrameSlot slot) const {
uint32_t offset =
stackPushed_ + GetICStackValueOffset() + slot.slot() * sizeof(JS::Value);
return Address(masm.getStackPointer(), offset);
}
BaseValueIndex CacheRegisterAllocator::addressOf(MacroAssembler& masm,
Register argcReg,
BaselineFrameSlot slot) const {
uint32_t offset =
stackPushed_ + GetICStackValueOffset() + slot.slot() * sizeof(JS::Value);
return BaseValueIndex(masm.getStackPointer(), argcReg, offset);
}
// BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
BaselineCacheIRCompiler::BaselineCacheIRCompiler(JSContext* cx,
TempAllocator& alloc,
const CacheIRWriter& writer,
uint32_t stubDataOffset)
: CacheIRCompiler(cx, alloc, writer, stubDataOffset, Mode::Baseline,
StubFieldPolicy::Address),
makesGCCalls_(false) {}
// AutoStubFrame methods
AutoStubFrame::AutoStubFrame(BaselineCacheIRCompiler& compiler)
: compiler(compiler)
#ifdef DEBUG
,
framePushedAtEnterStubFrame_(0)
#endif
{
}
void AutoStubFrame::enter(MacroAssembler& masm, Register scratch) {
MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
if (JitOptions.enableICFramePointers) {
// If we have already pushed the frame pointer, pop it
// before creating the stub frame.
PopICFrameRegs(masm);
}
EmitBaselineEnterStubFrame(masm, scratch);
#ifdef DEBUG
framePushedAtEnterStubFrame_ = masm.framePushed();
#endif
MOZ_ASSERT(!compiler.enteredStubFrame_);
compiler.enteredStubFrame_ = true;
// All current uses of this are to call VM functions that can GC.
compiler.makesGCCalls_ = true;
}
void AutoStubFrame::leave(MacroAssembler& masm) {
MOZ_ASSERT(compiler.enteredStubFrame_);
compiler.enteredStubFrame_ = false;
#ifdef DEBUG
masm.setFramePushed(framePushedAtEnterStubFrame_);
#endif
EmitBaselineLeaveStubFrame(masm);
if (JitOptions.enableICFramePointers) {
// We will pop the frame pointer when we return,
// so we have to push it again now.
PushICFrameRegs(masm);
}
}
void AutoStubFrame::storeTracedValue(MacroAssembler& masm, ValueOperand value) {
MOZ_ASSERT(compiler.localTracingSlots_ < 255);
MOZ_ASSERT(masm.framePushed() - framePushedAtEnterStubFrame_ ==
compiler.localTracingSlots_ * sizeof(Value));
masm.Push(value);
compiler.localTracingSlots_++;
}
void AutoStubFrame::loadTracedValue(MacroAssembler& masm, uint8_t slotIndex,
ValueOperand value) {
MOZ_ASSERT(slotIndex <= compiler.localTracingSlots_);
int32_t offset = BaselineStubFrameLayout::LocallyTracedValueOffset +
slotIndex * sizeof(Value);
masm.loadValue(Address(FramePointer, -offset), value);
}
#ifdef DEBUG
AutoStubFrame::~AutoStubFrame() { MOZ_ASSERT(!compiler.enteredStubFrame_); }
#endif
} // namespace jit
} // namespace js
bool BaselineCacheIRCompiler::makesGCCalls() const { return makesGCCalls_; }
Address BaselineCacheIRCompiler::stubAddress(uint32_t offset) const {
return Address(ICStubReg, stubDataOffset_ + offset);
}
template <typename Fn, Fn fn>
void BaselineCacheIRCompiler::callVM(MacroAssembler& masm) {
VMFunctionId id = VMFunctionToId<Fn, fn>::id;
callVMInternal(masm, id);
}
JitCode* BaselineCacheIRCompiler::compile() {
AutoCreatedBy acb(masm, "BaselineCacheIRCompiler::compile");
#ifndef JS_USE_LINK_REGISTER
masm.adjustFrame(sizeof(intptr_t));
#endif
#ifdef JS_CODEGEN_ARM
AutoNonDefaultSecondScratchRegister andssr(masm, BaselineSecondScratchReg);
#endif
if (JitOptions.enableICFramePointers) {
/* [SMDOC] Baseline IC Frame Pointers
*
* In general, ICs don't have frame pointers until just before
* doing a VM call, at which point we retroactively create a stub
* frame. However, for the sake of external profilers, we
* optionally support full-IC frame pointers in baseline ICs, with
* the following approach:
* 1. We push a frame pointer when we enter an IC.
* 2. We pop the frame pointer when we return from an IC, or
* when we jump to the next IC.
* 3. Entering a stub frame for a VM call already pushes a
* frame pointer, so we pop our existing frame pointer
* just before entering a stub frame and push it again
* just after leaving a stub frame.
* Some ops take advantage of the fact that the frame pointer is
* not updated until we enter a stub frame to read values from
* the caller's frame. To support this, we allocate a separate
* baselineFrame register when IC frame pointers are enabled.
*/
PushICFrameRegs(masm);
masm.moveStackPtrTo(FramePointer);
MOZ_ASSERT(baselineFrameReg() != FramePointer);
masm.loadPtr(Address(FramePointer, 0), baselineFrameReg());
}
// Count stub entries: We count entries rather than successes as it much
// easier to ensure ICStubReg is valid at entry than at exit.
Address enteredCount(ICStubReg, ICCacheIRStub::offsetOfEnteredCount());
masm.add32(Imm32(1), enteredCount);
CacheIRReader reader(writer_);
do {
CacheOp op = reader.readOp();
perfSpewer_.recordInstruction(masm, op);
switch (op) {
#define DEFINE_OP(op, ...) \
case CacheOp::op: \
if (!emit##op(reader)) return nullptr; \
break;
CACHE_IR_OPS(DEFINE_OP)
#undef DEFINE_OP
default:
MOZ_CRASH("Invalid op");
}
allocator.nextOp();
} while (reader.more());
MOZ_ASSERT(!enteredStubFrame_);
masm.assumeUnreachable("Should have returned from IC");
// Done emitting the main IC code. Now emit the failure paths.
for (size_t i = 0; i < failurePaths.length(); i++) {
if (!emitFailurePath(i)) {
return nullptr;
}
if (JitOptions.enableICFramePointers) {
PopICFrameRegs(masm);
}
EmitStubGuardFailure(masm);
}
Linker linker(masm);
Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Baseline));
if (!newStubCode) {
cx_->recoverFromOutOfMemory();
return nullptr;
}
newStubCode->setLocalTracingSlots(localTracingSlots_);
return newStubCode;
}
bool BaselineCacheIRCompiler::emitGuardShape(ObjOperandId objId,
uint32_t shapeOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch1(allocator, masm);
bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
Maybe<AutoScratchRegister> maybeScratch2;
if (needSpectreMitigations) {
maybeScratch2.emplace(allocator, masm);
}
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address addr(stubAddress(shapeOffset));
masm.loadPtr(addr, scratch1);
if (needSpectreMitigations) {
masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2,
obj, failure->label());
} else {
masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj,
scratch1, failure->label());
}
return true;
}
bool BaselineCacheIRCompiler::emitGuardProto(ObjOperandId objId,
uint32_t protoOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address addr(stubAddress(protoOffset));
masm.loadObjProto(obj, scratch);
masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitGuardCompartment(ObjOperandId objId,
uint32_t globalOffset,
uint32_t compartmentOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
// Verify that the global wrapper is still valid, as
// it is pre-requisite for doing the compartment check.
Address globalWrapper(stubAddress(globalOffset));
masm.loadPtr(globalWrapper, scratch);
Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
masm.branchPtr(Assembler::Equal, handlerAddr,
ImmPtr(&DeadObjectProxy::singleton), failure->label());
Address addr(stubAddress(compartmentOffset));
masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch,
failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitGuardAnyClass(ObjOperandId objId,
uint32_t claspOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address testAddr(stubAddress(claspOffset));
if (objectGuardNeedsSpectreMitigations(objId)) {
masm.branchTestObjClass(Assembler::NotEqual, obj, testAddr, scratch, obj,
failure->label());
} else {
masm.branchTestObjClassNoSpectreMitigations(
Assembler::NotEqual, obj, testAddr, scratch, failure->label());
}
return true;
}
bool BaselineCacheIRCompiler::emitGuardHasProxyHandler(ObjOperandId objId,
uint32_t handlerOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address testAddr(stubAddress(handlerOffset));
masm.loadPtr(testAddr, scratch);
Address handlerAddr(obj, ProxyObject::offsetOfHandler());
masm.branchPtr(Assembler::NotEqual, handlerAddr, scratch, failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitGuardSpecificObject(ObjOperandId objId,
uint32_t expectedOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address addr(stubAddress(expectedOffset));
masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitGuardSpecificFunction(
ObjOperandId objId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
return emitGuardSpecificObject(objId, expectedOffset);
}
bool BaselineCacheIRCompiler::emitGuardFunctionScript(
ObjOperandId funId, uint32_t expectedOffset, uint32_t nargsAndFlagsOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register fun = allocator.useRegister(masm, funId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address addr(stubAddress(expectedOffset));
masm.loadPrivate(Address(fun, JSFunction::offsetOfJitInfoOrScript()),
scratch);
masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitGuardSpecificAtom(StringOperandId strId,
uint32_t expectedOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register str = allocator.useRegister(masm, strId);
AutoScratchRegister scratch(allocator, masm);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address atomAddr(stubAddress(expectedOffset));
Label done;
masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
// The pointers are not equal, so if the input string is also an atom it
// must be a different string.
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), failure->label());
// Check the length.
masm.loadPtr(atomAddr, scratch);
masm.loadStringLength(scratch, scratch);
masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
scratch, failure->label());
// We have a non-atomized string with the same length. Call a helper
// function to do the comparison.
LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.PushRegsInMask(volatileRegs);
using Fn = bool (*)(JSString* str1, JSString* str2);
masm.setupUnalignedABICall(scratch);
masm.loadPtr(atomAddr, scratch);
masm.passABIArg(scratch);
masm.passABIArg(str);
masm.callWithABI<Fn, EqualStringsHelperPure>();
masm.storeCallPointerResult(scratch);
LiveRegisterSet ignore;
ignore.add(scratch);
masm.PopRegsInMaskIgnore(volatileRegs, ignore);
masm.branchIfFalseBool(scratch, failure->label());
masm.bind(&done);
return true;
}
bool BaselineCacheIRCompiler::emitGuardSpecificSymbol(SymbolOperandId symId,
uint32_t expectedOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register sym = allocator.useRegister(masm, symId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
Address addr(stubAddress(expectedOffset));
masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
return true;
}
bool BaselineCacheIRCompiler::emitLoadValueResult(uint32_t valOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
masm.loadValue(stubAddress(valOffset), output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitLoadFixedSlotResult(ObjOperandId objId,
uint32_t offsetOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.load32(stubAddress(offsetOffset), scratch);
masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitLoadFixedSlotTypedResult(
ObjOperandId objId, uint32_t offsetOffset, ValueType) {
// The type is only used by Warp.
return emitLoadFixedSlotResult(objId, offsetOffset);
}
bool BaselineCacheIRCompiler::emitLoadDynamicSlotResult(ObjOperandId objId,
uint32_t offsetOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
AutoScratchRegister scratch2(allocator, masm);
masm.load32(stubAddress(offsetOffset), scratch);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitCallScriptedGetterShared(
ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
uint32_t nargsAndFlagsOffset, Maybe<uint32_t> icScriptOffset) {
ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
Address getterAddr(stubAddress(getterOffset));
AutoScratchRegister code(allocator, masm);
AutoScratchRegister callee(allocator, masm);
AutoScratchRegister scratch(allocator, masm);
bool isInlined = icScriptOffset.isSome();
// First, retrieve raw jitcode for getter.
masm.loadPtr(getterAddr, callee);
if (isInlined) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.loadBaselineJitCodeRaw(callee, code, failure->label());
} else {
masm.loadJitCodeRaw(callee, code);
}
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
if (!sameRealm) {
masm.switchToObjectRealm(callee, scratch);
}
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0, /*countIncludesThis = */ false);
// Getter is called with 0 arguments, just |receiver| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
// properly on ARM.
masm.Push(receiver);
if (isInlined) {
// Store icScript in the context.
Address icScriptAddr(stubAddress(*icScriptOffset));
masm.loadPtr(icScriptAddr, scratch);
masm.storeICScriptInJSContext(scratch);
}
masm.Push(callee);
masm.PushFrameDescriptorForJitCall(FrameType::BaselineStub, /* argc = */ 0);
// Handle arguments underflow.
Label noUnderflow;
masm.loadFunctionArgCount(callee, callee);
masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
// Call the arguments rectifier.
ArgumentsRectifierKind kind = isInlined
? ArgumentsRectifierKind::TrialInlining
: ArgumentsRectifierKind::Normal;
TrampolinePtr argumentsRectifier =
cx_->runtime()->jitRuntime()->getArgumentsRectifier(kind);
masm.movePtr(argumentsRectifier, code);
masm.bind(&noUnderflow);
masm.callJit(code);
stubFrame.leave(masm);
if (!sameRealm) {
masm.switchToBaselineFrameRealm(R1.scratchReg());
}
return true;
}
bool BaselineCacheIRCompiler::emitCallScriptedGetterResult(
ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
uint32_t nargsAndFlagsOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<uint32_t> icScriptOffset = mozilla::Nothing();
return emitCallScriptedGetterShared(receiverId, getterOffset, sameRealm,
nargsAndFlagsOffset, icScriptOffset);
}
bool BaselineCacheIRCompiler::emitCallInlinedGetterResult(
ValOperandId receiverId, uint32_t getterOffset, uint32_t icScriptOffset,
bool sameRealm, uint32_t nargsAndFlagsOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitCallScriptedGetterShared(receiverId, getterOffset, sameRealm,
nargsAndFlagsOffset,
mozilla::Some(icScriptOffset));
}
bool BaselineCacheIRCompiler::emitCallNativeGetterResult(
ValOperandId receiverId, uint32_t getterOffset, bool sameRealm,
uint32_t nargsAndFlagsOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
ValueOperand receiver = allocator.useValueRegister(masm, receiverId);
Address getterAddr(stubAddress(getterOffset));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the callee in the scratch register.
masm.loadPtr(getterAddr, scratch);
masm.Push(receiver);
masm.Push(scratch);
using Fn =
bool (*)(JSContext*, HandleFunction, HandleValue, MutableHandleValue);
callVM<Fn, CallNativeGetter>(masm);
stubFrame.leave(masm);
return true;
}
bool BaselineCacheIRCompiler::emitCallDOMGetterResult(ObjOperandId objId,
uint32_t jitInfoOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Address jitInfoAddr(stubAddress(jitInfoOffset));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the JSJitInfo in the scratch register.
masm.loadPtr(jitInfoAddr, scratch);
masm.Push(obj);
masm.Push(scratch);
using Fn =
bool (*)(JSContext*, const JSJitInfo*, HandleObject, MutableHandleValue);
callVM<Fn, CallDOMGetter>(masm);
stubFrame.leave(masm);
return true;
}
bool BaselineCacheIRCompiler::emitProxyGetResult(ObjOperandId objId,
uint32_t idOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Register obj = allocator.useRegister(masm, objId);
Address idAddr(stubAddress(idOffset));
AutoScratchRegister scratch(allocator, masm);
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Load the jsid in the scratch register.
masm.loadPtr(idAddr, scratch);
masm.Push(scratch);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, HandleId, MutableHandleValue);
callVM<Fn, ProxyGetProperty>(masm);
stubFrame.leave(masm);
return true;
}
bool BaselineCacheIRCompiler::emitFrameIsConstructingResult() {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register outputScratch = output.valueReg().scratchReg();
// Load the CalleeToken.
Address tokenAddr(baselineFrameReg(), JitFrameLayout::offsetOfCalleeToken());
masm.loadPtr(tokenAddr, outputScratch);
// The low bit indicates whether this call is constructing, just clear the
// other bits.
static_assert(CalleeToken_Function == 0x0);
static_assert(CalleeToken_FunctionConstructing == 0x1);
masm.andPtr(Imm32(0x1), outputScratch);
masm.tagValue(JSVAL_TYPE_BOOLEAN, outputScratch, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitLoadConstantStringResult(uint32_t strOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
masm.loadPtr(stubAddress(strOffset), scratch);
masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitCompareStringResult(JSOp op,
StringOperandId lhsId,
StringOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register left = allocator.useRegister(masm, lhsId);
Register right = allocator.useRegister(masm, rhsId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
allocator.discardStack(masm);
Label slow, done;
masm.compareStrings(op, left, right, scratch, &slow);
masm.jump(&done);
masm.bind(&slow);
{
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Push the operands in reverse order for JSOp::Le and JSOp::Gt:
// - |left <= right| is implemented as |right >= left|.
// - |left > right| is implemented as |right < left|.
if (op == JSOp::Le || op == JSOp::Gt) {
masm.Push(left);
masm.Push(right);
} else {
masm.Push(right);
masm.Push(left);
}
using Fn = bool (*)(JSContext*, HandleString, HandleString, bool*);
if (op == JSOp::Eq || op == JSOp::StrictEq) {
callVM<Fn, jit::StringsEqual<EqualityKind::Equal>>(masm);
} else if (op == JSOp::Ne || op == JSOp::StrictNe) {
callVM<Fn, jit::StringsEqual<EqualityKind::NotEqual>>(masm);
} else if (op == JSOp::Lt || op == JSOp::Gt) {
callVM<Fn, jit::StringsCompare<ComparisonKind::LessThan>>(masm);
} else {
MOZ_ASSERT(op == JSOp::Le || op == JSOp::Ge);
callVM<Fn, jit::StringsCompare<ComparisonKind::GreaterThanOrEqual>>(masm);
}
stubFrame.leave(masm);
masm.storeCallPointerResult(scratch);
}
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitSameValueResult(ValOperandId lhsId,
ValOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegister scratch(allocator, masm);
ValueOperand lhs = allocator.useValueRegister(masm, lhsId);
#ifdef JS_CODEGEN_X86
// Use the output to avoid running out of registers.
allocator.copyToScratchValueRegister(masm, rhsId, output.valueReg());
ValueOperand rhs = output.valueReg();
#else
ValueOperand rhs = allocator.useValueRegister(masm, rhsId);
#endif
allocator.discardStack(masm);
Label done;
Label call;
// Check to see if the values have identical bits.
// This is correct for SameValue because SameValue(NaN,NaN) is true,
// and SameValue(0,-0) is false.
masm.branch64(Assembler::NotEqual, lhs.toRegister64(), rhs.toRegister64(),
&call);
masm.moveValue(BooleanValue(true), output.valueReg());
masm.jump(&done);
{
masm.bind(&call);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.pushValue(lhs);
masm.pushValue(rhs);
using Fn = bool (*)(JSContext*, HandleValue, HandleValue, bool*);
callVM<Fn, SameValue>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
}
masm.bind(&done);
return true;
}
bool BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed,
ObjOperandId objId,
uint32_t offsetOffset,
ValOperandId rhsId) {
Register obj = allocator.useRegister(masm, objId);
ValueOperand val = allocator.useValueRegister(masm, rhsId);
AutoScratchRegister scratch1(allocator, masm);
Maybe<AutoScratchRegister> scratch2;
if (!isFixed) {
scratch2.emplace(allocator, masm);
}
Address offsetAddr = stubAddress(offsetOffset);
masm.load32(offsetAddr, scratch1);
if (isFixed) {
BaseIndex slot(obj, scratch1, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
} else {
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
EmitPreBarrier(masm, slot, MIRType::Value);
masm.storeValue(val, slot);
}
emitPostBarrierSlot(obj, val, scratch1);
return true;
}
bool BaselineCacheIRCompiler::emitStoreFixedSlot(ObjOperandId objId,
uint32_t offsetOffset,
ValOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitStoreSlotShared(true, objId, offsetOffset, rhsId);
}
bool BaselineCacheIRCompiler::emitStoreDynamicSlot(ObjOperandId objId,
uint32_t offsetOffset,
ValOperandId rhsId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitStoreSlotShared(false, objId, offsetOffset, rhsId);
}
bool BaselineCacheIRCompiler::emitAddAndStoreSlotShared(
CacheOp op, ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
uint32_t newShapeOffset, Maybe<uint32_t> numNewSlotsOffset) {
Register obj = allocator.useRegister(masm, objId);
ValueOperand val = allocator.useValueRegister(masm, rhsId);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister scratch2(allocator, masm);
Address newShapeAddr = stubAddress(newShapeOffset);
Address offsetAddr = stubAddress(offsetOffset);
if (op == CacheOp::AllocateAndStoreDynamicSlot) {
// We have to (re)allocate dynamic slots. Do this first, as it's the
// only fallible operation here. Note that growSlotsPure is fallible but
// does not GC.
Address numNewSlotsAddr = stubAddress(*numNewSlotsOffset);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
LiveRegisterSet save(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());
masm.PushRegsInMask(save);
using Fn = bool (*)(JSContext* cx, NativeObject* obj, uint32_t newCount);
masm.setupUnalignedABICall(scratch1);
masm.loadJSContext(scratch1);
masm.passABIArg(scratch1);
masm.passABIArg(obj);
masm.load32(numNewSlotsAddr, scratch2);
masm.passABIArg(scratch2);
masm.callWithABI<Fn, NativeObject::growSlotsPure>();
masm.storeCallPointerResult(scratch1);
LiveRegisterSet ignore;
ignore.add(scratch1);
masm.PopRegsInMaskIgnore(save, ignore);
masm.branchIfFalseBool(scratch1, failure->label());
}
// Update the object's shape.
masm.loadPtr(newShapeAddr, scratch1);
masm.storeObjShape(scratch1, obj,
[](MacroAssembler& masm, const Address& addr) {
EmitPreBarrier(masm, addr, MIRType::Shape);
});
// Perform the store. No pre-barrier required since this is a new
// initialization.
masm.load32(offsetAddr, scratch1);
if (op == CacheOp::AddAndStoreFixedSlot) {
BaseIndex slot(obj, scratch1, TimesOne);
masm.storeValue(val, slot);
} else {
MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
op == CacheOp::AllocateAndStoreDynamicSlot);
masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
BaseIndex slot(scratch2, scratch1, TimesOne);
masm.storeValue(val, slot);
}
emitPostBarrierSlot(obj, val, scratch1);
return true;
}
bool BaselineCacheIRCompiler::emitAddAndStoreFixedSlot(
ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
uint32_t newShapeOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot, objId,
offsetOffset, rhsId, newShapeOffset,
numNewSlotsOffset);
}
bool BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot(
ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
uint32_t newShapeOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
Maybe<uint32_t> numNewSlotsOffset = mozilla::Nothing();
return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot, objId,
offsetOffset, rhsId, newShapeOffset,
numNewSlotsOffset);
}
bool BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot(
ObjOperandId objId, uint32_t offsetOffset, ValOperandId rhsId,
uint32_t newShapeOffset, uint32_t numNewSlotsOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot, objId,
offsetOffset, rhsId, newShapeOffset,
mozilla::Some(numNewSlotsOffset));
}
bool BaselineCacheIRCompiler::emitArrayJoinResult(ObjOperandId objId,
StringOperandId sepId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
Register sep = allocator.useRegister(masm, sepId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
allocator.discardStack(masm);
// Load obj->elements in scratch.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
Address lengthAddr(scratch, ObjectElements::offsetOfLength());
// If array length is 0, return empty string.
Label finished;
{
Label arrayNotEmpty;
masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(0), &arrayNotEmpty);
masm.movePtr(ImmGCPtr(cx_->names().empty_), scratch);
masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
masm.jump(&finished);
masm.bind(&arrayNotEmpty);
}
Label vmCall;
// Otherwise, handle array length 1 case.
masm.branch32(Assembler::NotEqual, lengthAddr, Imm32(1), &vmCall);
// But only if initializedLength is also 1.
Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::NotEqual, initLength, Imm32(1), &vmCall);
// And only if elem0 is a string.
Address elementAddr(scratch, 0);
masm.branchTestString(Assembler::NotEqual, elementAddr, &vmCall);
// Store the value.
masm.loadValue(elementAddr, output.valueReg());
masm.jump(&finished);
// Otherwise call into the VM.
{
masm.bind(&vmCall);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(sep);
masm.Push(obj);
using Fn = JSString* (*)(JSContext*, HandleObject, HandleString);
callVM<Fn, jit::ArrayJoin>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_STRING, ReturnReg, output.valueReg());
}
masm.bind(&finished);
return true;
}
bool BaselineCacheIRCompiler::emitPackedArraySliceResult(
uint32_t templateObjectOffset, ObjOperandId arrayId, Int32OperandId beginId,
Int32OperandId endId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
Register array = allocator.useRegister(masm, arrayId);
Register begin = allocator.useRegister(masm, beginId);
Register end = allocator.useRegister(masm, endId);
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.branchArrayIsNotPacked(array, scratch1, scratch2, failure->label());
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch1);
// Don't attempt to pre-allocate the object, instead always use the slow
// path.
ImmPtr result(nullptr);
masm.Push(result);
masm.Push(end);
masm.Push(begin);
masm.Push(array);
using Fn =
JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
callVM<Fn, ArraySliceDense>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitArgumentsSliceResult(
uint32_t templateObjectOffset, ObjOperandId argsId, Int32OperandId beginId,
Int32OperandId endId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register args = allocator.useRegister(masm, argsId);
Register begin = allocator.useRegister(masm, beginId);
Register end = allocator.useRegister(masm, endId);
allocator.discardStack(masm);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
// Don't attempt to pre-allocate the object, instead always use the slow path.
ImmPtr result(nullptr);
masm.Push(result);
masm.Push(end);
masm.Push(begin);
masm.Push(args);
using Fn =
JSObject* (*)(JSContext*, HandleObject, int32_t, int32_t, HandleObject);
callVM<Fn, ArgumentsSliceDense>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_OBJECT, ReturnReg, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitIsArrayResult(ValOperandId inputId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
ValueOperand val = allocator.useValueRegister(masm, inputId);
allocator.discardStack(masm);
Label isNotArray;
// Primitives are never Arrays.
masm.fallibleUnboxObject(val, scratch1, &isNotArray);
Label isArray;
masm.branchTestObjClass(Assembler::Equal, scratch1, &ArrayObject::class_,
scratch2, scratch1, &isArray);
// isArray can also return true for Proxy wrapped Arrays.
masm.branchTestObjectIsProxy(false, scratch1, scratch2, &isNotArray);
Label done;
{
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch2);
masm.Push(scratch1);
using Fn = bool (*)(JSContext*, HandleObject, bool*);
callVM<Fn, js::IsArrayFromJit>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
masm.jump(&done);
}
masm.bind(&isNotArray);
masm.moveValue(BooleanValue(false), output.valueReg());
masm.jump(&done);
masm.bind(&isArray);
masm.moveValue(BooleanValue(true), output.valueReg());
masm.bind(&done);
return true;
}
bool BaselineCacheIRCompiler::emitIsTypedArrayResult(ObjOperandId objId,
bool isPossiblyWrapped) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
allocator.discardStack(masm);
Label notTypedArray, isProxy, done;
masm.loadObjClassUnsafe(obj, scratch);
masm.branchIfClassIsNotTypedArray(scratch, &notTypedArray);
masm.moveValue(BooleanValue(true), output.valueReg());
masm.jump(&done);
masm.bind(&notTypedArray);
if (isPossiblyWrapped) {
masm.branchTestClassIsProxy(true, scratch, &isProxy);
}
masm.moveValue(BooleanValue(false), output.valueReg());
if (isPossiblyWrapped) {
masm.jump(&done);
masm.bind(&isProxy);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(obj);
using Fn = bool (*)(JSContext*, JSObject*, bool*);
callVM<Fn, jit::IsPossiblyWrappedTypedArray>(masm);
stubFrame.leave(masm);
masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, output.valueReg());
}
masm.bind(&done);
return true;
}
bool BaselineCacheIRCompiler::emitLoadStringCharResult(StringOperandId strId,
Int32OperandId indexId,
bool handleOOB) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register str = allocator.useRegister(masm, strId);
Register index = allocator.useRegister(masm, indexId);
AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
AutoScratchRegisterMaybeOutputType scratch2(allocator, masm, output);
AutoScratchRegister scratch3(allocator, masm);
// Bounds check, load string char.
Label done;
Label loadFailed;
if (!handleOOB) {
FailurePath* failure;
if (!addFailurePath(&failure)) {
return false;
}
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch1, failure->label());
masm.loadStringChar(str, index, scratch1, scratch2, scratch3,
failure->label());
allocator.discardStack(masm);
} else {
// Discard the stack before jumping to |done|.
allocator.discardStack(masm);
// Return the empty string for out-of-bounds access.
masm.movePtr(ImmGCPtr(cx_->names().empty_), scratch2);
// This CacheIR op is always preceded by |LinearizeForCharAccess|, so we're
// guaranteed to see no nested ropes.
masm.spectreBoundsCheck32(index, Address(str, JSString::offsetOfLength()),
scratch1, &done);
masm.loadStringChar(str, index, scratch1, scratch2, scratch3, &loadFailed);
}
// Load StaticString for this char. For larger code units perform a VM call.
Label vmCall;
masm.boundsCheck32PowerOfTwo(scratch1, StaticStrings::UNIT_STATIC_LIMIT,
&vmCall);
masm.movePtr(ImmPtr(&cx_->staticStrings().unitStaticTable), scratch2);
masm.loadPtr(BaseIndex(scratch2, scratch1, ScalePointer), scratch2);
masm.jump(&done);
if (handleOOB) {
masm.bind(&loadFailed);
masm.assumeUnreachable("loadStringChar can't fail for linear strings");
}
{
masm.bind(&vmCall);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch2);
masm.Push(scratch1);
using Fn = JSLinearString* (*)(JSContext*, int32_t);
callVM<Fn, jit::StringFromCharCode>(masm);
stubFrame.leave(masm);
masm.storeCallPointerResult(scratch2);
}
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_STRING, scratch2, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitStringFromCodeResult(Int32OperandId codeId,
StringCode stringCode) {
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register code = allocator.useRegister(masm, codeId);
FailurePath* failure = nullptr;
if (stringCode == StringCode::CodePoint) {
if (!addFailurePath(&failure)) {
return false;
}
}
if (stringCode == StringCode::CodePoint) {
// Note: This condition must match tryAttachStringFromCodePoint to prevent
// failure loops.
masm.branch32(Assembler::Above, code, Imm32(unicode::NonBMPMax),
failure->label());
}
allocator.discardStack(masm);
// We pre-allocate atoms for the first UNIT_STATIC_LIMIT characters.
// For code units larger than that, we must do a VM call.
Label vmCall;
masm.boundsCheck32PowerOfTwo(code, StaticStrings::UNIT_STATIC_LIMIT, &vmCall);
masm.movePtr(ImmPtr(cx_->runtime()->staticStrings->unitStaticTable), scratch);
masm.loadPtr(BaseIndex(scratch, code, ScalePointer), scratch);
Label done;
masm.jump(&done);
{
masm.bind(&vmCall);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(code);
if (stringCode == StringCode::CodeUnit) {
using Fn = JSLinearString* (*)(JSContext*, int32_t);
callVM<Fn, jit::StringFromCharCode>(masm);
} else {
using Fn = JSString* (*)(JSContext*, int32_t);
callVM<Fn, jit::StringFromCodePoint>(masm);
}
stubFrame.leave(masm);
masm.storeCallPointerResult(scratch);
}
masm.bind(&done);
masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
return true;
}
bool BaselineCacheIRCompiler::emitStringFromCharCodeResult(
Int32OperandId codeId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitStringFromCodeResult(codeId, StringCode::CodeUnit);
}
bool BaselineCacheIRCompiler::emitStringFromCodePointResult(
Int32OperandId codeId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
return emitStringFromCodeResult(codeId, StringCode::CodePoint);
}
bool BaselineCacheIRCompiler::emitMathRandomResult(uint32_t rngOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegister scratch1(allocator, masm);
AutoScratchRegister64 scratch2(allocator, masm);
AutoAvailableFloatRegister scratchFloat(*this, FloatReg0);
Address rngAddr(stubAddress(rngOffset));
masm.loadPtr(rngAddr, scratch1);
masm.randomDouble(scratch1, scratchFloat, scratch2,
output.valueReg().toRegister64());
if (js::SupportDifferentialTesting()) {
masm.loadConstantDouble(0.0, scratchFloat);
}
masm.boxDouble(scratchFloat, output.valueReg(), scratchFloat);
return true;
}
bool BaselineCacheIRCompiler::emitReflectGetPrototypeOfResult(
ObjOperandId objId) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Register obj = allocator.useRegister(masm, objId);
allocator.discardStack(masm);
MOZ_ASSERT(uintptr_t(TaggedProto::LazyProto) == 1);
masm.loadObjProto(obj, scratch);
Label hasProto;
masm.branchPtr(Assembler::Above, scratch, ImmWord(1), &hasProto);
// Call into the VM for lazy prototypes.
Label slow, done;
masm.branchPtr(Assembler::Equal, scratch, ImmWord(1), &slow);
masm.moveValue(NullValue(), output.valueReg());
masm.jump(&done);
masm.bind(&hasProto);
masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
masm.jump(&done);
{
masm.bind(&slow);
AutoStubFrame stubFrame(*this);
stubFrame.enter(masm, scratch);
masm.Push(obj);
using Fn = bool (*)(JSContext*, HandleObject, MutableHandleValue);
callVM<Fn, jit::GetPrototypeOf>(masm);
stubFrame.leave(masm);
}
masm.bind(&done);
return true;
}
bool BaselineCacheIRCompiler::emitHasClassResult(ObjOperandId objId,
uint32_t claspOffset) {
JitSpew(JitSpew_Codegen, "%s", __FUNCTION__);
AutoOutputRegister output(*this);
Register obj = allocator.useRegister(masm, objId);
AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
Address claspAddr(stubAddress(claspOffset));
masm.loadObjClassUnsafe(obj, scratch);
masm.cmpPtrSet(Assembler::Equal, claspAddr, scratch.get(), scratch);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
return true;
}
void BaselineCacheIRCompiler::emitAtomizeString(Register str, Register temp,
Label* failure) {
Label isAtom;
masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT), &isAtom);
{
LiveRegisterSet save(GeneralRegisterSet::Volatile(),
liveVolatileFloatRegs());