Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* JavaScript "portable baseline interpreter": an interpreter that is
* capable of running ICs, but without any native code.
*
* See the [SMDOC] in vm/PortableBaselineInterpret.h for a high-level
* overview.
*/
#include "vm/PortableBaselineInterpret.h"
#include "mozilla/Maybe.h"
#include <algorithm>
#include "fdlibm.h"
#include "jsapi.h"
#include "builtin/DataViewObject.h"
#include "builtin/MapObject.h"
#include "builtin/Object.h"
#include "builtin/RegExp.h"
#include "builtin/String.h"
#include "debugger/DebugAPI.h"
#include "jit/BaselineFrame.h"
#include "jit/BaselineIC.h"
#include "jit/BaselineJIT.h"
#include "jit/CacheIR.h"
#include "jit/CacheIRCompiler.h"
#include "jit/CacheIRReader.h"
#include "jit/JitFrames.h"
#include "jit/JitScript.h"
#include "jit/JSJitFrameIter.h"
#include "jit/VMFunctions.h"
#include "proxy/DeadObjectProxy.h"
#include "proxy/DOMProxy.h"
#include "util/Unicode.h"
#include "vm/AsyncFunction.h"
#include "vm/AsyncIteration.h"
#include "vm/DateObject.h"
#include "vm/EnvironmentObject.h"
#include "vm/EqualityOperations.h"
#include "vm/GeneratorObject.h"
#include "vm/Interpreter.h"
#include "vm/Iteration.h"
#include "vm/JitActivation.h"
#include "vm/JSObject.h"
#include "vm/JSScript.h"
#include "vm/Opcodes.h"
#include "vm/PlainObject.h"
#include "vm/Shape.h"
#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "vm/WrapperObject.h"
#include "debugger/DebugAPI-inl.h"
#include "jit/BaselineFrame-inl.h"
#include "jit/JitScript-inl.h"
#include "vm/EnvironmentObject-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/JSScript-inl.h"
#include "vm/PlainObject-inl.h"
namespace js {
namespace pbl {
using namespace js::jit;
/*
* Debugging: enable `TRACE_INTERP` for an extremely detailed dump of
* what PBL is doing at every opcode step.
*/
// #define TRACE_INTERP
#ifdef TRACE_INTERP
# define TRACE_PRINTF(...) \
do { \
printf(__VA_ARGS__); \
fflush(stdout); \
} while (0)
#else
# define TRACE_PRINTF(...) \
do { \
} while (0)
#endif
#define PBL_HYBRID_ICS_DEFAULT true
// Whether we are using the "hybrid" strategy for ICs (see the [SMDOC]
// in PortableBaselineInterpret.h for more). This is currently a
// constant, but may become configurable in the future.
static const bool kHybridICsInterp = PBL_HYBRID_ICS_DEFAULT;
// Whether to compile interpreter dispatch loops using computed gotos
// or direct switches.
#if !defined(__wasi__) && !defined(TRACE_INTERP)
# define ENABLE_COMPUTED_GOTO_DISPATCH
#endif
// Whether to compile in interrupt checks in the main interpreter loop.
#ifndef __wasi__
// On WASI, with a single thread, there is no possibility for an
// interrupt to come asynchronously.
# define ENABLE_INTERRUPT_CHECKS
#endif
// Whether to compile in coverage counting in the main interpreter loop.
#ifndef __wasi__
# define ENABLE_COVERAGE
#endif
/*
* -----------------------------------------------
* Stack handling
* -----------------------------------------------
*/
// Large enough for an exit frame.
static const size_t kStackMargin = 1024;
/*
* A 64-bit value on the auxiliary stack. May either be a raw uint64_t
* or a `Value` (JS NaN-boxed value).
*/
struct StackVal {
uint64_t value;
explicit StackVal(uint64_t v) : value(v) {}
explicit StackVal(const Value& v) : value(v.asRawBits()) {}
uint64_t asUInt64() const { return value; }
Value asValue() const { return Value::fromRawBits(value); }
};
/*
* A native-pointer-sized value on the auxiliary stack. This is
* separate from the above because we support running on 32-bit
* systems as well! May either be a `void*` (or cast to a
* `CalleeToken`, which is a typedef for a `void*`), or a `uint32_t`,
* which always fits in a native pointer width on our supported
* platforms. (See static_assert below.)
*/
struct StackValNative {
static_assert(sizeof(uintptr_t) >= sizeof(uint32_t),
"Must be at least a 32-bit system to use PBL.");
uintptr_t value;
explicit StackValNative(void* v) : value(reinterpret_cast<uintptr_t>(v)) {}
explicit StackValNative(uint32_t v) : value(v) {}
void* asVoidPtr() const { return reinterpret_cast<void*>(value); }
CalleeToken asCalleeToken() const {
return reinterpret_cast<CalleeToken>(value);
}
};
// Assert that the stack alignment is no more than the size of a
// StackValNative -- we rely on this when setting up call frames.
static_assert(JitStackAlignment <= sizeof(StackValNative));
#define PUSH(val) *--sp = (val)
#define POP() (*sp++)
#define POPN(n) sp += (n)
#define PUSHNATIVE(val) \
do { \
StackValNative* nativeSP = reinterpret_cast<StackValNative*>(sp); \
*--nativeSP = (val); \
sp = reinterpret_cast<StackVal*>(nativeSP); \
} while (0)
#define POPNNATIVE(n) \
sp = reinterpret_cast<StackVal*>(reinterpret_cast<StackValNative*>(sp) + (n))
/*
* Helper class to manage the auxiliary stack and push/pop frames.
*/
struct Stack {
StackVal* fp;
StackVal* base;
StackVal* top;
StackVal* unwindingSP;
StackVal* unwindingFP;
explicit Stack(PortableBaselineStack& pbs)
: fp(reinterpret_cast<StackVal*>(pbs.top)),
base(reinterpret_cast<StackVal*>(pbs.base)),
top(reinterpret_cast<StackVal*>(pbs.top)),
unwindingSP(nullptr),
unwindingFP(nullptr) {}
MOZ_ALWAYS_INLINE bool check(StackVal* sp, size_t size, bool margin = true) {
return reinterpret_cast<uintptr_t>(base) + size +
(margin ? kStackMargin : 0) <=
reinterpret_cast<uintptr_t>(sp);
}
[[nodiscard]] MOZ_ALWAYS_INLINE StackVal* allocate(StackVal* sp,
size_t size) {
if (!check(sp, size, false)) {
return nullptr;
}
sp = reinterpret_cast<StackVal*>(reinterpret_cast<uintptr_t>(sp) - size);
return sp;
}
uint32_t frameSize(StackVal* sp, BaselineFrame* curFrame) const {
return sizeof(StackVal) * (reinterpret_cast<StackVal*>(fp) - sp);
}
[[nodiscard]] MOZ_ALWAYS_INLINE BaselineFrame* pushFrame(StackVal* sp,
JSContext* cx,
JSObject* envChain) {
TRACE_PRINTF("pushFrame: sp = %p fp = %p\n", sp, fp);
if (sp == base) {
return nullptr;
}
PUSHNATIVE(StackValNative(fp));
fp = sp;
TRACE_PRINTF("pushFrame: new fp = %p\n", fp);
BaselineFrame* frame =
reinterpret_cast<BaselineFrame*>(allocate(sp, BaselineFrame::Size()));
if (!frame) {
return nullptr;
}
frame->setFlags(BaselineFrame::Flags::RUNNING_IN_INTERPRETER);
frame->setEnvironmentChain(envChain);
JSScript* script = frame->script();
frame->setICScript(script->jitScript()->icScript());
frame->setInterpreterFieldsForPrologue(script);
#ifdef DEBUG
frame->setDebugFrameSize(0);
#endif
return frame;
}
StackVal* popFrame() {
StackVal* newTOS =
reinterpret_cast<StackVal*>(reinterpret_cast<StackValNative*>(fp) + 1);
fp = reinterpret_cast<StackVal*>(
reinterpret_cast<StackValNative*>(fp)->asVoidPtr());
MOZ_ASSERT(fp);
TRACE_PRINTF("popFrame: fp = %p\n", fp);
return newTOS;
}
void setFrameSize(StackVal* sp, BaselineFrame* prevFrame) {
#ifdef DEBUG
MOZ_ASSERT(fp != nullptr);
uintptr_t frameSize =
reinterpret_cast<uintptr_t>(fp) - reinterpret_cast<uintptr_t>(sp);
MOZ_ASSERT(reinterpret_cast<uintptr_t>(fp) >=
reinterpret_cast<uintptr_t>(sp));
TRACE_PRINTF("pushExitFrame: fp = %p cur() = %p -> frameSize = %d\n", fp,
sp, int(frameSize));
MOZ_ASSERT(frameSize >= BaselineFrame::Size());
prevFrame->setDebugFrameSize(frameSize);
#endif
}
[[nodiscard]] MOZ_ALWAYS_INLINE StackVal* pushExitFrame(
StackVal* sp, BaselineFrame* prevFrame) {
uint8_t* prevFP =
reinterpret_cast<uint8_t*>(prevFrame) + BaselineFrame::Size();
TRACE_PRINTF(
"pushExitFrame: prevFrame = %p sp = %p BaselineFrame::Size() = %d -> "
"computed prevFP = %p actual fp = %p\n",
prevFrame, sp, int(BaselineFrame::Size()), prevFP, fp);
MOZ_ASSERT(reinterpret_cast<StackVal*>(prevFP) == fp);
setFrameSize(sp, prevFrame);
if (!check(sp, sizeof(StackVal) * 4, false)) {
return nullptr;
}
PUSHNATIVE(StackValNative(
MakeFrameDescriptorForJitCall(FrameType::BaselineJS, 0)));
PUSHNATIVE(StackValNative(nullptr)); // fake return address.
PUSHNATIVE(StackValNative(prevFP));
StackVal* exitFP = sp;
fp = exitFP;
TRACE_PRINTF(" -> fp = %p\n", fp);
PUSHNATIVE(StackValNative(uint32_t(ExitFrameType::Bare)));
return exitFP;
}
void popExitFrame(StackVal* fp) {
StackVal* prevFP = reinterpret_cast<StackVal*>(
reinterpret_cast<StackValNative*>(fp)->asVoidPtr());
MOZ_ASSERT(prevFP);
this->fp = prevFP;
TRACE_PRINTF("popExitFrame: fp -> %p\n", fp);
}
BaselineFrame* frameFromFP() {
return reinterpret_cast<BaselineFrame*>(reinterpret_cast<uintptr_t>(fp) -
BaselineFrame::Size());
}
static HandleValue handle(StackVal* sp) {
return HandleValue::fromMarkedLocation(reinterpret_cast<Value*>(sp));
}
static MutableHandleValue handleMut(StackVal* sp) {
return MutableHandleValue::fromMarkedLocation(reinterpret_cast<Value*>(sp));
}
};
/*
* -----------------------------------------------
* Interpreter state
* -----------------------------------------------
*/
struct ICRegs {
static const int kMaxICVals = 16;
// Values can be split across two OR'd halves: unboxed bits and
// tags. We mostly rely on the CacheIRWriter/Reader typed OperandId
// system to ensure "type safety" in CacheIR w.r.t. unboxing: the
// existence of an ObjOperandId implies that the value is unboxed,
// so `icVals` contains a pointer (reinterpret-casted to a
// `uint64_t`) and `icTags` contains the tag bits. An operator that
// requires a tagged Value can OR the two together (this corresponds
// to `useValueRegister` rather than `useRegister` in the native
// baseline compiler).
uint64_t icVals[kMaxICVals];
uint64_t icTags[kMaxICVals]; // Shifted tags.
int extraArgs;
};
struct State {
RootedValue value0;
RootedValue value1;
RootedValue value2;
RootedValue value3;
RootedValue res;
RootedObject obj0;
RootedObject obj1;
RootedObject obj2;
RootedString str0;
RootedString str1;
RootedString str2;
RootedScript script0;
Rooted<PropertyName*> name0;
Rooted<jsid> id0;
Rooted<JSAtom*> atom0;
RootedFunction fun0;
Rooted<Scope*> scope0;
explicit State(JSContext* cx)
: value0(cx),
value1(cx),
value2(cx),
value3(cx),
res(cx),
obj0(cx),
obj1(cx),
obj2(cx),
str0(cx),
str1(cx),
str2(cx),
script0(cx),
name0(cx),
id0(cx),
atom0(cx),
fun0(cx),
scope0(cx) {}
};
/*
* -----------------------------------------------
* RAII helpers for pushing exit frames.
*
* (See [SMDOC] in PortableBaselineInterpret.h for more.)
* -----------------------------------------------
*/
class VMFrameManager {
JSContext* cx;
BaselineFrame* frame;
friend class VMFrame;
public:
VMFrameManager(JSContext*& cx_, BaselineFrame* frame_)
: cx(cx_), frame(frame_) {
// Once the manager exists, we need to create an exit frame to
// have access to the cx (unless the caller promises it is not
// calling into the rest of the runtime).
cx_ = nullptr;
}
void switchToFrame(BaselineFrame* frame) { this->frame = frame; }
// Provides the JSContext, but *only* if no calls into the rest of
// the runtime (that may invoke a GC or stack walk) occur. Avoids
// the overhead of pushing an exit frame.
JSContext* cxForLocalUseOnly() const { return cx; }
};
class VMFrame {
JSContext* cx;
Stack& stack;
StackVal* exitFP;
void* prevSavedStack;
public:
VMFrame(VMFrameManager& mgr, Stack& stack_, StackVal* sp)
: cx(mgr.cx), stack(stack_) {
exitFP = stack.pushExitFrame(sp, mgr.frame);
if (!exitFP) {
return;
}
cx->activation()->asJit()->setJSExitFP(reinterpret_cast<uint8_t*>(exitFP));
prevSavedStack = cx->portableBaselineStack().top;
cx->portableBaselineStack().top = reinterpret_cast<void*>(spBelowFrame());
}
StackVal* spBelowFrame() {
return reinterpret_cast<StackVal*>(reinterpret_cast<uintptr_t>(exitFP) -
sizeof(StackValNative));
}
~VMFrame() {
stack.popExitFrame(exitFP);
cx->portableBaselineStack().top = prevSavedStack;
}
JSContext* getCx() const { return cx; }
operator JSContext*() const { return cx; }
bool success() const { return exitFP != nullptr; }
};
#define PUSH_EXIT_FRAME_OR_RET(value, init_sp) \
VMFrame cx(ctx.frameMgr, ctx.stack, init_sp); \
if (!cx.success()) { \
return value; \
} \
StackVal* sp = cx.spBelowFrame(); /* shadow the definition */ \
(void)sp; /* avoid unused-variable warnings */
#define PUSH_IC_FRAME() \
ctx.error = PBIResult::Error; \
PUSH_EXIT_FRAME_OR_RET(IC_ERROR_SENTINEL(), ctx.sp())
#define PUSH_FALLBACK_IC_FRAME() \
ctx.error = PBIResult::Error; \
PUSH_EXIT_FRAME_OR_RET(IC_ERROR_SENTINEL(), sp)
#define PUSH_EXIT_FRAME() \
frame->interpreterPC() = pc; \
SYNCSP(); \
PUSH_EXIT_FRAME_OR_RET(PBIResult::Error, sp)
/*
* -----------------------------------------------
* IC Interpreter
* -----------------------------------------------
*/
// Bundled state for passing to ICs, in order to reduce the number of
// arguments and hence make the call more ABI-efficient. (On some
// platforms, e.g. Wasm on Wasmtime on x86-64, we have as few as four
// register arguments available before args go through the stack.)
struct ICCtx {
BaselineFrame* frame;
VMFrameManager frameMgr;
State& state;
ICRegs icregs;
Stack& stack;
StackVal* sp_;
PBIResult error;
uint64_t arg2;
ICCtx(JSContext* cx, BaselineFrame* frame_, State& state_, Stack& stack_)
: frame(frame_),
frameMgr(cx, frame_),
state(state_),
icregs(),
stack(stack_),
sp_(nullptr),
error(PBIResult::Ok),
arg2(0) {}
StackVal* sp() { return sp_; }
};
#define IC_ERROR_SENTINEL() (JS::MagicValue(JS_GENERIC_MAGIC).asRawBits())
// Universal signature for an IC stub function.
typedef uint64_t (*ICStubFunc)(uint64_t arg0, uint64_t arg1, ICStub* stub,
ICCtx& ctx);
#define PBL_CALL_IC(jitcode, ctx, stubvalue, result, arg0, arg1, arg2value, \
hasarg2) \
do { \
ctx.arg2 = arg2value; \
ICStubFunc func = reinterpret_cast<ICStubFunc>(jitcode); \
result = func(arg0, arg1, stubvalue, ctx); \
} while (0)
typedef PBIResult (*PBIFunc)(JSContext* cx_, State& state, Stack& stack,
StackVal* sp, JSObject* envChain, Value* ret,
jsbytecode* pc, ImmutableScriptData* isd,
jsbytecode* restartEntryPC,
BaselineFrame* restartFrame,
StackVal* restartEntryFrame,
PBIResult restartCode);
static uint64_t CallNextIC(uint64_t arg0, uint64_t arg1, ICStub* stub,
ICCtx& ctx);
static double DoubleMinMax(bool isMax, double first, double second) {
if (std::isnan(first) || std::isnan(second)) {
return JS::GenericNaN();
} else if (first == 0 && second == 0) {
// -0 and 0 compare as equal, but we have to distinguish
// them here: min(-0, 0) = -0, max(-0, 0) = 0.
bool firstPos = !std::signbit(first);
bool secondPos = !std::signbit(second);
bool sign = isMax ? (firstPos || secondPos) : (firstPos && secondPos);
return sign ? 0.0 : -0.0;
} else {
return isMax ? ((first >= second) ? first : second)
: ((first <= second) ? first : second);
}
}
// Interpreter for CacheIR.
uint64_t ICInterpretOps(uint64_t arg0, uint64_t arg1, ICStub* stub,
ICCtx& ctx) {
{
#define DECLARE_CACHEOP_CASE(name) __label__ cacheop_##name
#ifdef ENABLE_COMPUTED_GOTO_DISPATCH
# define CACHEOP_CASE(name) cacheop_##name : CACHEOP_TRACE(name)
# define CACHEOP_CASE_FALLTHROUGH(name) CACHEOP_CASE(name)
# define DISPATCH_CACHEOP() \
cacheop = cacheIRReader.readOp(); \
goto* addresses[long(cacheop)];
#else // ENABLE_COMPUTED_GOTO_DISPATCH
# define CACHEOP_CASE(name) \
case CacheOp::name: \
cacheop_##name : CACHEOP_TRACE(name)
# define CACHEOP_CASE_FALLTHROUGH(name) \
[[fallthrough]]; \
CACHEOP_CASE(name)
# define DISPATCH_CACHEOP() \
cacheop = cacheIRReader.readOp(); \
goto dispatch;
#endif // !ENABLE_COMPUTED_GOTO_DISPATCH
#define READ_REG(index) ctx.icregs.icVals[(index)]
#define READ_VALUE_REG(index) \
Value::fromRawBits(ctx.icregs.icVals[(index)] | ctx.icregs.icTags[(index)])
#define WRITE_REG(index, value, tag) \
do { \
ctx.icregs.icVals[(index)] = (value); \
ctx.icregs.icTags[(index)] = uint64_t(JSVAL_TAG_##tag) << JSVAL_TAG_SHIFT; \
} while (0)
#define WRITE_VALUE_REG(index, value) \
do { \
ctx.icregs.icVals[(index)] = (value).asRawBits(); \
ctx.icregs.icTags[(index)] = 0; \
} while (0)
DECLARE_CACHEOP_CASE(ReturnFromIC);
DECLARE_CACHEOP_CASE(GuardToObject);
DECLARE_CACHEOP_CASE(GuardIsNullOrUndefined);
DECLARE_CACHEOP_CASE(GuardIsNull);
DECLARE_CACHEOP_CASE(GuardIsUndefined);
DECLARE_CACHEOP_CASE(GuardIsNotUninitializedLexical);
DECLARE_CACHEOP_CASE(GuardToBoolean);
DECLARE_CACHEOP_CASE(GuardToString);
DECLARE_CACHEOP_CASE(GuardToSymbol);
DECLARE_CACHEOP_CASE(GuardToBigInt);
DECLARE_CACHEOP_CASE(GuardIsNumber);
DECLARE_CACHEOP_CASE(GuardToInt32);
DECLARE_CACHEOP_CASE(GuardToNonGCThing);
DECLARE_CACHEOP_CASE(GuardBooleanToInt32);
DECLARE_CACHEOP_CASE(GuardToInt32Index);
DECLARE_CACHEOP_CASE(Int32ToIntPtr);
DECLARE_CACHEOP_CASE(GuardToInt32ModUint32);
DECLARE_CACHEOP_CASE(GuardNonDoubleType);
DECLARE_CACHEOP_CASE(GuardShape);
DECLARE_CACHEOP_CASE(GuardFuse);
DECLARE_CACHEOP_CASE(GuardProto);
DECLARE_CACHEOP_CASE(GuardNullProto);
DECLARE_CACHEOP_CASE(GuardClass);
DECLARE_CACHEOP_CASE(GuardAnyClass);
DECLARE_CACHEOP_CASE(GuardGlobalGeneration);
DECLARE_CACHEOP_CASE(HasClassResult);
DECLARE_CACHEOP_CASE(GuardCompartment);
DECLARE_CACHEOP_CASE(GuardIsExtensible);
DECLARE_CACHEOP_CASE(GuardIsNativeObject);
DECLARE_CACHEOP_CASE(GuardIsProxy);
DECLARE_CACHEOP_CASE(GuardIsNotProxy);
DECLARE_CACHEOP_CASE(GuardIsNotArrayBufferMaybeShared);
DECLARE_CACHEOP_CASE(GuardIsTypedArray);
DECLARE_CACHEOP_CASE(GuardHasProxyHandler);
DECLARE_CACHEOP_CASE(GuardIsNotDOMProxy);
DECLARE_CACHEOP_CASE(GuardSpecificObject);
DECLARE_CACHEOP_CASE(GuardObjectIdentity);
DECLARE_CACHEOP_CASE(GuardSpecificFunction);
DECLARE_CACHEOP_CASE(GuardFunctionScript);
DECLARE_CACHEOP_CASE(GuardSpecificAtom);
DECLARE_CACHEOP_CASE(GuardSpecificSymbol);
DECLARE_CACHEOP_CASE(GuardSpecificInt32);
DECLARE_CACHEOP_CASE(GuardNoDenseElements);
DECLARE_CACHEOP_CASE(GuardStringToIndex);
DECLARE_CACHEOP_CASE(GuardStringToInt32);
DECLARE_CACHEOP_CASE(GuardStringToNumber);
DECLARE_CACHEOP_CASE(BooleanToNumber);
DECLARE_CACHEOP_CASE(GuardHasGetterSetter);
DECLARE_CACHEOP_CASE(GuardInt32IsNonNegative);
DECLARE_CACHEOP_CASE(GuardDynamicSlotIsSpecificObject);
DECLARE_CACHEOP_CASE(GuardDynamicSlotIsNotObject);
DECLARE_CACHEOP_CASE(GuardFixedSlotValue);
DECLARE_CACHEOP_CASE(GuardDynamicSlotValue);
DECLARE_CACHEOP_CASE(LoadFixedSlot);
DECLARE_CACHEOP_CASE(LoadDynamicSlot);
DECLARE_CACHEOP_CASE(GuardNoAllocationMetadataBuilder);
DECLARE_CACHEOP_CASE(GuardFunctionHasJitEntry);
DECLARE_CACHEOP_CASE(GuardFunctionHasNoJitEntry);
DECLARE_CACHEOP_CASE(GuardFunctionIsNonBuiltinCtor);
DECLARE_CACHEOP_CASE(GuardFunctionIsConstructor);
DECLARE_CACHEOP_CASE(GuardNotClassConstructor);
DECLARE_CACHEOP_CASE(GuardArrayIsPacked);
DECLARE_CACHEOP_CASE(GuardArgumentsObjectFlags);
DECLARE_CACHEOP_CASE(LoadObject);
DECLARE_CACHEOP_CASE(LoadProtoObject);
DECLARE_CACHEOP_CASE(LoadProto);
DECLARE_CACHEOP_CASE(LoadEnclosingEnvironment);
DECLARE_CACHEOP_CASE(LoadWrapperTarget);
DECLARE_CACHEOP_CASE(LoadValueTag);
DECLARE_CACHEOP_CASE(LoadArgumentFixedSlot);
DECLARE_CACHEOP_CASE(LoadArgumentDynamicSlot);
DECLARE_CACHEOP_CASE(TruncateDoubleToUInt32);
DECLARE_CACHEOP_CASE(MegamorphicLoadSlotResult);
DECLARE_CACHEOP_CASE(MegamorphicLoadSlotByValueResult);
DECLARE_CACHEOP_CASE(MegamorphicSetElement);
DECLARE_CACHEOP_CASE(StoreFixedSlot);
DECLARE_CACHEOP_CASE(StoreDynamicSlot);
DECLARE_CACHEOP_CASE(AddAndStoreFixedSlot);
DECLARE_CACHEOP_CASE(AddAndStoreDynamicSlot);
DECLARE_CACHEOP_CASE(AllocateAndStoreDynamicSlot);
DECLARE_CACHEOP_CASE(StoreDenseElement);
DECLARE_CACHEOP_CASE(StoreDenseElementHole);
DECLARE_CACHEOP_CASE(ArrayPush);
DECLARE_CACHEOP_CASE(IsObjectResult);
DECLARE_CACHEOP_CASE(Int32MinMax);
DECLARE_CACHEOP_CASE(StoreTypedArrayElement);
DECLARE_CACHEOP_CASE(CallInt32ToString);
DECLARE_CACHEOP_CASE(CallScriptedFunction);
DECLARE_CACHEOP_CASE(CallNativeFunction);
DECLARE_CACHEOP_CASE(MetaScriptedThisShape);
DECLARE_CACHEOP_CASE(LoadFixedSlotResult);
DECLARE_CACHEOP_CASE(LoadDynamicSlotResult);
DECLARE_CACHEOP_CASE(LoadDenseElementResult);
DECLARE_CACHEOP_CASE(LoadInt32ArrayLengthResult);
DECLARE_CACHEOP_CASE(LoadInt32ArrayLength);
DECLARE_CACHEOP_CASE(LoadArgumentsObjectArgResult);
DECLARE_CACHEOP_CASE(LinearizeForCharAccess);
DECLARE_CACHEOP_CASE(LoadStringCharResult);
DECLARE_CACHEOP_CASE(LoadStringCharCodeResult);
DECLARE_CACHEOP_CASE(LoadStringLengthResult);
DECLARE_CACHEOP_CASE(LoadObjectResult);
DECLARE_CACHEOP_CASE(LoadStringResult);
DECLARE_CACHEOP_CASE(LoadSymbolResult);
DECLARE_CACHEOP_CASE(LoadInt32Result);
DECLARE_CACHEOP_CASE(LoadDoubleResult);
DECLARE_CACHEOP_CASE(LoadBigIntResult);
DECLARE_CACHEOP_CASE(LoadBooleanResult);
DECLARE_CACHEOP_CASE(LoadInt32Constant);
DECLARE_CACHEOP_CASE(LoadConstantStringResult);
DECLARE_CACHEOP_CASE(Int32AddResult);
DECLARE_CACHEOP_CASE(Int32SubResult);
DECLARE_CACHEOP_CASE(Int32MulResult);
DECLARE_CACHEOP_CASE(Int32DivResult);
DECLARE_CACHEOP_CASE(Int32ModResult);
DECLARE_CACHEOP_CASE(Int32BitOrResult);
DECLARE_CACHEOP_CASE(Int32BitXorResult);
DECLARE_CACHEOP_CASE(Int32BitAndResult);
DECLARE_CACHEOP_CASE(Int32PowResult);
DECLARE_CACHEOP_CASE(Int32IncResult);
DECLARE_CACHEOP_CASE(LoadInt32TruthyResult);
DECLARE_CACHEOP_CASE(LoadStringTruthyResult);
DECLARE_CACHEOP_CASE(LoadObjectTruthyResult);
DECLARE_CACHEOP_CASE(LoadValueResult);
DECLARE_CACHEOP_CASE(LoadOperandResult);
DECLARE_CACHEOP_CASE(CallStringConcatResult);
DECLARE_CACHEOP_CASE(CompareStringResult);
DECLARE_CACHEOP_CASE(CompareInt32Result);
DECLARE_CACHEOP_CASE(CompareNullUndefinedResult);
DECLARE_CACHEOP_CASE(AssertPropertyLookup);
DECLARE_CACHEOP_CASE(GuardIsFixedLengthTypedArray);
DECLARE_CACHEOP_CASE(GuardIndexIsNotDenseElement);
DECLARE_CACHEOP_CASE(LoadFixedSlotTypedResult);
DECLARE_CACHEOP_CASE(LoadDenseElementHoleResult);
DECLARE_CACHEOP_CASE(LoadDenseElementExistsResult);
DECLARE_CACHEOP_CASE(LoadTypedArrayElementExistsResult);
DECLARE_CACHEOP_CASE(LoadDenseElementHoleExistsResult);
DECLARE_CACHEOP_CASE(LoadTypedArrayElementResult);
DECLARE_CACHEOP_CASE(RegExpFlagResult);
DECLARE_CACHEOP_CASE(GuardNumberToIntPtrIndex);
DECLARE_CACHEOP_CASE(CallRegExpMatcherResult);
DECLARE_CACHEOP_CASE(CallRegExpSearcherResult);
DECLARE_CACHEOP_CASE(RegExpSearcherLastLimitResult);
DECLARE_CACHEOP_CASE(RegExpHasCaptureGroupsResult);
DECLARE_CACHEOP_CASE(RegExpBuiltinExecMatchResult);
DECLARE_CACHEOP_CASE(RegExpBuiltinExecTestResult);
DECLARE_CACHEOP_CASE(CallSubstringKernelResult);
DECLARE_CACHEOP_CASE(StringReplaceStringResult);
DECLARE_CACHEOP_CASE(StringSplitStringResult);
DECLARE_CACHEOP_CASE(RegExpPrototypeOptimizableResult);
DECLARE_CACHEOP_CASE(RegExpInstanceOptimizableResult);
DECLARE_CACHEOP_CASE(GetFirstDollarIndexResult);
DECLARE_CACHEOP_CASE(StringToAtom);
DECLARE_CACHEOP_CASE(GuardTagNotEqual);
DECLARE_CACHEOP_CASE(IdToStringOrSymbol);
DECLARE_CACHEOP_CASE(MegamorphicStoreSlot);
DECLARE_CACHEOP_CASE(MegamorphicHasPropResult);
DECLARE_CACHEOP_CASE(ObjectToIteratorResult);
DECLARE_CACHEOP_CASE(ArrayJoinResult);
DECLARE_CACHEOP_CASE(ObjectKeysResult);
DECLARE_CACHEOP_CASE(PackedArrayPopResult);
DECLARE_CACHEOP_CASE(PackedArrayShiftResult);
DECLARE_CACHEOP_CASE(PackedArraySliceResult);
DECLARE_CACHEOP_CASE(IsArrayResult);
DECLARE_CACHEOP_CASE(IsPackedArrayResult);
DECLARE_CACHEOP_CASE(IsCallableResult);
DECLARE_CACHEOP_CASE(IsConstructorResult);
DECLARE_CACHEOP_CASE(IsCrossRealmArrayConstructorResult);
DECLARE_CACHEOP_CASE(IsTypedArrayResult);
DECLARE_CACHEOP_CASE(IsTypedArrayConstructorResult);
DECLARE_CACHEOP_CASE(ArrayBufferViewByteOffsetInt32Result);
DECLARE_CACHEOP_CASE(ArrayBufferViewByteOffsetDoubleResult);
DECLARE_CACHEOP_CASE(TypedArrayByteLengthInt32Result);
DECLARE_CACHEOP_CASE(TypedArrayByteLengthDoubleResult);
DECLARE_CACHEOP_CASE(TypedArrayElementSizeResult);
DECLARE_CACHEOP_CASE(NewStringIteratorResult);
DECLARE_CACHEOP_CASE(NewRegExpStringIteratorResult);
DECLARE_CACHEOP_CASE(ObjectCreateResult);
DECLARE_CACHEOP_CASE(NewArrayFromLengthResult);
DECLARE_CACHEOP_CASE(NewTypedArrayFromArrayBufferResult);
DECLARE_CACHEOP_CASE(NewTypedArrayFromArrayResult);
DECLARE_CACHEOP_CASE(NewTypedArrayFromLengthResult);
DECLARE_CACHEOP_CASE(StringFromCharCodeResult);
DECLARE_CACHEOP_CASE(StringFromCodePointResult);
DECLARE_CACHEOP_CASE(StringIncludesResult);
DECLARE_CACHEOP_CASE(StringIndexOfResult);
DECLARE_CACHEOP_CASE(StringLastIndexOfResult);
DECLARE_CACHEOP_CASE(StringStartsWithResult);
DECLARE_CACHEOP_CASE(StringEndsWithResult);
DECLARE_CACHEOP_CASE(StringToLowerCaseResult);
DECLARE_CACHEOP_CASE(StringToUpperCaseResult);
DECLARE_CACHEOP_CASE(StringTrimResult);
DECLARE_CACHEOP_CASE(StringTrimStartResult);
DECLARE_CACHEOP_CASE(StringTrimEndResult);
DECLARE_CACHEOP_CASE(MathAbsInt32Result);
DECLARE_CACHEOP_CASE(MathAbsNumberResult);
DECLARE_CACHEOP_CASE(MathClz32Result);
DECLARE_CACHEOP_CASE(MathSignInt32Result);
DECLARE_CACHEOP_CASE(MathSignNumberResult);
DECLARE_CACHEOP_CASE(MathSignNumberToInt32Result);
DECLARE_CACHEOP_CASE(MathImulResult);
DECLARE_CACHEOP_CASE(MathSqrtNumberResult);
DECLARE_CACHEOP_CASE(MathFRoundNumberResult);
DECLARE_CACHEOP_CASE(MathRandomResult);
DECLARE_CACHEOP_CASE(MathHypot2NumberResult);
DECLARE_CACHEOP_CASE(MathHypot3NumberResult);
DECLARE_CACHEOP_CASE(MathHypot4NumberResult);
DECLARE_CACHEOP_CASE(MathAtan2NumberResult);
DECLARE_CACHEOP_CASE(MathFloorNumberResult);
DECLARE_CACHEOP_CASE(MathCeilNumberResult);
DECLARE_CACHEOP_CASE(MathTruncNumberResult);
DECLARE_CACHEOP_CASE(MathCeilToInt32Result);
DECLARE_CACHEOP_CASE(MathFloorToInt32Result);
DECLARE_CACHEOP_CASE(MathTruncToInt32Result);
DECLARE_CACHEOP_CASE(MathRoundToInt32Result);
DECLARE_CACHEOP_CASE(NumberMinMax);
DECLARE_CACHEOP_CASE(Int32MinMaxArrayResult);
DECLARE_CACHEOP_CASE(NumberMinMaxArrayResult);
DECLARE_CACHEOP_CASE(MathFunctionNumberResult);
DECLARE_CACHEOP_CASE(NumberParseIntResult);
DECLARE_CACHEOP_CASE(DoubleParseIntResult);
DECLARE_CACHEOP_CASE(ObjectToStringResult);
DECLARE_CACHEOP_CASE(CallNativeSetter);
DECLARE_CACHEOP_CASE(CallSetArrayLength);
DECLARE_CACHEOP_CASE(CallNumberToString);
DECLARE_CACHEOP_CASE(Int32ToStringWithBaseResult);
DECLARE_CACHEOP_CASE(BooleanToString);
DECLARE_CACHEOP_CASE(BindFunctionResult);
DECLARE_CACHEOP_CASE(SpecializedBindFunctionResult);
DECLARE_CACHEOP_CASE(CallGetSparseElementResult);
DECLARE_CACHEOP_CASE(LoadArgumentsObjectLengthResult);
DECLARE_CACHEOP_CASE(LoadArgumentsObjectLength);
DECLARE_CACHEOP_CASE(LoadBoundFunctionNumArgs);
DECLARE_CACHEOP_CASE(LoadBoundFunctionTarget);
DECLARE_CACHEOP_CASE(LoadArrayBufferByteLengthInt32Result);
DECLARE_CACHEOP_CASE(LoadArrayBufferByteLengthDoubleResult);
DECLARE_CACHEOP_CASE(LinearizeForCodePointAccess);
DECLARE_CACHEOP_CASE(LoadArrayBufferViewLengthInt32Result);
DECLARE_CACHEOP_CASE(LoadArrayBufferViewLengthDoubleResult);
DECLARE_CACHEOP_CASE(LoadStringAtResult);
DECLARE_CACHEOP_CASE(LoadStringCodePointResult);
DECLARE_CACHEOP_CASE(CallNativeGetterResult);
DECLARE_CACHEOP_CASE(LoadUndefinedResult);
DECLARE_CACHEOP_CASE(LoadDoubleConstant);
DECLARE_CACHEOP_CASE(LoadBooleanConstant);
DECLARE_CACHEOP_CASE(LoadUndefined);
DECLARE_CACHEOP_CASE(LoadConstantString);
DECLARE_CACHEOP_CASE(LoadInstanceOfObjectResult);
DECLARE_CACHEOP_CASE(LoadTypeOfObjectResult);
DECLARE_CACHEOP_CASE(DoubleAddResult);
DECLARE_CACHEOP_CASE(DoubleSubResult);
DECLARE_CACHEOP_CASE(DoubleMulResult);
DECLARE_CACHEOP_CASE(DoubleDivResult);
DECLARE_CACHEOP_CASE(DoubleModResult);
DECLARE_CACHEOP_CASE(DoublePowResult);
DECLARE_CACHEOP_CASE(Int32LeftShiftResult);
DECLARE_CACHEOP_CASE(Int32RightShiftResult);
DECLARE_CACHEOP_CASE(Int32URightShiftResult);
DECLARE_CACHEOP_CASE(Int32NotResult);
DECLARE_CACHEOP_CASE(LoadDoubleTruthyResult);
DECLARE_CACHEOP_CASE(NewPlainObjectResult);
DECLARE_CACHEOP_CASE(NewArrayObjectResult);
DECLARE_CACHEOP_CASE(CompareObjectResult);
DECLARE_CACHEOP_CASE(CompareSymbolResult);
DECLARE_CACHEOP_CASE(CompareDoubleResult);
DECLARE_CACHEOP_CASE(IndirectTruncateInt32Result);
DECLARE_CACHEOP_CASE(CallScriptedSetter);
DECLARE_CACHEOP_CASE(CallBoundScriptedFunction);
DECLARE_CACHEOP_CASE(CallScriptedGetterResult);
// Define the computed-goto table regardless of dispatch strategy so
// we don't get unused-label errors. (We need some of the labels
// even without this for the predict-next mechanism, so we can't
// conditionally elide labels either.)
static const void* const addresses[long(CacheOp::NumOpcodes)] = {
#define OP(name, ...) &&cacheop_##name,
CACHE_IR_OPS(OP)
#undef OP
};
(void)addresses;
#define CACHEOP_TRACE(name) \
TRACE_PRINTF("cacheop (frame %p stub %p): " #name "\n", ctx.frame, cstub);
#define FAIL_IC() goto next_ic;
// We set a fixed bound on the number of icVals which is smaller than what IC
// generators may use. As a result we can't evaluate an IC if it defines too
// many values. Note that we don't need to check this when reading from icVals
// because we should have bailed out before the earlier write which defined the
// same value. Similarly, we don't need to check writes to locations which we've
// just read from.
#define BOUNDSCHECK(resultId) \
if (resultId.id() >= ICRegs::kMaxICVals) FAIL_IC();
#define PREDICT_NEXT(name) \
if (cacheIRReader.peekOp() == CacheOp::name) { \
cacheIRReader.readOp(); \
cacheop = CacheOp::name; \
goto cacheop_##name; \
}
#define PREDICT_RETURN() \
if (cacheIRReader.peekOp() == CacheOp::ReturnFromIC) { \
TRACE_PRINTF("stub successful, predicted return\n"); \
return retValue; \
}
ICCacheIRStub* cstub = stub->toCacheIRStub();
const CacheIRStubInfo* stubInfo = cstub->stubInfo();
CacheIRReader cacheIRReader(stubInfo);
uint64_t retValue = 0;
CacheOp cacheop;
WRITE_VALUE_REG(0, Value::fromRawBits(arg0));
WRITE_VALUE_REG(1, Value::fromRawBits(arg1));
WRITE_VALUE_REG(2, Value::fromRawBits(ctx.arg2));
DISPATCH_CACHEOP();
#ifndef ENABLE_COMPUTED_GOTO_DISPATCH
dispatch:
switch (cacheop)
#endif
{
CACHEOP_CASE(ReturnFromIC) {
TRACE_PRINTF("stub successful!\n");
return retValue;
}
CACHEOP_CASE(GuardToObject) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
TRACE_PRINTF("GuardToObject: icVal %" PRIx64 "\n",
READ_REG(inputId.id()));
if (!v.isObject()) {
FAIL_IC();
}
WRITE_REG(inputId.id(), reinterpret_cast<uint64_t>(&v.toObject()),
OBJECT);
PREDICT_NEXT(GuardShape);
PREDICT_NEXT(GuardSpecificFunction);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNullOrUndefined) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isNullOrUndefined()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNull) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isNull()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsUndefined) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isUndefined()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNotUninitializedLexical) {
ValOperandId valId = cacheIRReader.valOperandId();
Value val = READ_VALUE_REG(valId.id());
if (val == MagicValue(JS_UNINITIALIZED_LEXICAL)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToBoolean) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isBoolean()) {
FAIL_IC();
}
WRITE_REG(inputId.id(), v.toBoolean() ? 1 : 0, BOOLEAN);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToString) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isString()) {
FAIL_IC();
}
WRITE_REG(inputId.id(), reinterpret_cast<uint64_t>(v.toString()),
STRING);
PREDICT_NEXT(GuardToString);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToSymbol) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isSymbol()) {
FAIL_IC();
}
WRITE_REG(inputId.id(), reinterpret_cast<uint64_t>(v.toSymbol()),
SYMBOL);
PREDICT_NEXT(GuardSpecificSymbol);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToBigInt) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isBigInt()) {
FAIL_IC();
}
WRITE_REG(inputId.id(), reinterpret_cast<uint64_t>(v.toBigInt()),
BIGINT);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNumber) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
if (!v.isNumber()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToInt32) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value v = READ_VALUE_REG(inputId.id());
TRACE_PRINTF("GuardToInt32 (%d): icVal %" PRIx64 "\n", inputId.id(),
READ_REG(inputId.id()));
if (!v.isInt32()) {
FAIL_IC();
}
// N.B.: we don't need to unbox because the low 32 bits are
// already the int32 itself, and we are careful when using
// `Int32Operand`s to only use those bits.
PREDICT_NEXT(GuardToInt32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToNonGCThing) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value input = READ_VALUE_REG(inputId.id());
if (input.isGCThing()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardBooleanToInt32) {
ValOperandId inputId = cacheIRReader.valOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
Value v = READ_VALUE_REG(inputId.id());
if (!v.isBoolean()) {
FAIL_IC();
}
WRITE_REG(resultId.id(), v.toBoolean() ? 1 : 0, INT32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToInt32Index) {
ValOperandId inputId = cacheIRReader.valOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
Value val = READ_VALUE_REG(inputId.id());
if (val.isInt32()) {
WRITE_REG(resultId.id(), val.toInt32(), INT32);
DISPATCH_CACHEOP();
} else if (val.isDouble()) {
double doubleVal = val.toDouble();
if (int32_t(doubleVal) == doubleVal) {
WRITE_REG(resultId.id(), int32_t(doubleVal), INT32);
DISPATCH_CACHEOP();
}
}
FAIL_IC();
}
CACHEOP_CASE(Int32ToIntPtr) {
Int32OperandId inputId = cacheIRReader.int32OperandId();
IntPtrOperandId resultId = cacheIRReader.intPtrOperandId();
BOUNDSCHECK(resultId);
int32_t input = int32_t(READ_REG(inputId.id()));
// Note that this must sign-extend to pointer width:
WRITE_REG(resultId.id(), intptr_t(input), OBJECT);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardToInt32ModUint32) {
ValOperandId inputId = cacheIRReader.valOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
Value input = READ_VALUE_REG(inputId.id());
if (input.isInt32()) {
WRITE_REG(resultId.id(), input.toInt32(), INT32);
DISPATCH_CACHEOP();
} else if (input.isDouble()) {
double doubleVal = input.toDouble();
// Accept any double that fits in an int64_t but truncate the top 32
// bits.
if (doubleVal >= double(INT64_MIN) &&
doubleVal <= double(INT64_MAX)) {
WRITE_REG(resultId.id(), int64_t(doubleVal), INT32);
DISPATCH_CACHEOP();
}
}
FAIL_IC();
}
CACHEOP_CASE(GuardNonDoubleType) {
ValOperandId inputId = cacheIRReader.valOperandId();
ValueType type = cacheIRReader.valueType();
Value val = READ_VALUE_REG(inputId.id());
switch (type) {
case ValueType::String:
if (!val.isString()) {
FAIL_IC();
}
break;
case ValueType::Symbol:
if (!val.isSymbol()) {
FAIL_IC();
}
break;
case ValueType::BigInt:
if (!val.isBigInt()) {
FAIL_IC();
}
break;
case ValueType::Int32:
if (!val.isInt32()) {
FAIL_IC();
}
break;
case ValueType::Boolean:
if (!val.isBoolean()) {
FAIL_IC();
}
break;
case ValueType::Undefined:
if (!val.isUndefined()) {
FAIL_IC();
}
break;
case ValueType::Null:
if (!val.isNull()) {
FAIL_IC();
}
break;
default:
MOZ_CRASH("Unexpected type");
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardShape) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t shapeOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uintptr_t expectedShape = stubInfo->getStubRawWord(cstub, shapeOffset);
if (reinterpret_cast<uintptr_t>(obj->shape()) != expectedShape) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFuse) {
RealmFuses::FuseIndex fuseIndex = cacheIRReader.realmFuseIndex();
if (!ctx.frameMgr.cxForLocalUseOnly()
->realm()
->realmFuses.getFuseByIndex(fuseIndex)
->intact()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardProto) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t protoOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSObject* proto = reinterpret_cast<JSObject*>(
stubInfo->getStubRawWord(cstub, protoOffset));
if (obj->staticPrototype() != proto) {
FAIL_IC();
}
PREDICT_NEXT(LoadProto);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardNullProto) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->taggedProto().raw()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardClass) {
ObjOperandId objId = cacheIRReader.objOperandId();
GuardClassKind kind = cacheIRReader.guardClassKind();
JSObject* object = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
switch (kind) {
case GuardClassKind::Array:
if (object->getClass() != &ArrayObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::PlainObject:
if (object->getClass() != &PlainObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::FixedLengthArrayBuffer:
if (object->getClass() != &FixedLengthArrayBufferObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::ResizableArrayBuffer:
if (object->getClass() != &ResizableArrayBufferObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::FixedLengthSharedArrayBuffer:
if (object->getClass() !=
&FixedLengthSharedArrayBufferObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::GrowableSharedArrayBuffer:
if (object->getClass() !=
&GrowableSharedArrayBufferObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::FixedLengthDataView:
if (object->getClass() != &FixedLengthDataViewObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::ResizableDataView:
if (object->getClass() != &ResizableDataViewObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::MappedArguments:
if (object->getClass() != &MappedArgumentsObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::UnmappedArguments:
if (object->getClass() != &UnmappedArgumentsObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::WindowProxy:
if (object->getClass() != ctx.frameMgr.cxForLocalUseOnly()
->runtime()
->maybeWindowProxyClass()) {
FAIL_IC();
}
break;
case GuardClassKind::JSFunction:
if (!object->is<JSFunction>()) {
FAIL_IC();
}
break;
case GuardClassKind::Set:
if (object->getClass() != &SetObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::Map:
if (object->getClass() != &MapObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::BoundFunction:
if (object->getClass() != &BoundFunctionObject::class_) {
FAIL_IC();
}
break;
case GuardClassKind::Date:
if (object->getClass() != &DateObject::class_) {
FAIL_IC();
}
break;
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardAnyClass) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t claspOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSClass* clasp = reinterpret_cast<JSClass*>(
stubInfo->getStubRawWord(cstub, claspOffset));
if (obj->getClass() != clasp) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardGlobalGeneration) {
uint32_t expectedOffset = cacheIRReader.stubOffset();
uint32_t generationAddrOffset = cacheIRReader.stubOffset();
uint32_t expected = stubInfo->getStubRawInt32(cstub, expectedOffset);
uint32_t* generationAddr = reinterpret_cast<uint32_t*>(
stubInfo->getStubRawWord(cstub, generationAddrOffset));
if (*generationAddr != expected) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(HasClassResult) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t claspOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSClass* clasp = reinterpret_cast<JSClass*>(
stubInfo->getStubRawWord(cstub, claspOffset));
retValue = BooleanValue(obj->getClass() == clasp).asRawBits();
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardCompartment) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t globalOffset = cacheIRReader.stubOffset();
uint32_t compartmentOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSObject* global = reinterpret_cast<JSObject*>(
stubInfo->getStubRawWord(cstub, globalOffset));
JS::Compartment* compartment = reinterpret_cast<JS::Compartment*>(
stubInfo->getStubRawWord(cstub, compartmentOffset));
if (IsDeadProxyObject(global)) {
FAIL_IC();
}
if (obj->compartment() != compartment) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsExtensible) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->nonProxyIsExtensible()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNativeObject) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (!obj->is<NativeObject>()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsProxy) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (!obj->is<ProxyObject>()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNotProxy) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->is<ProxyObject>()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNotArrayBufferMaybeShared) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
const JSClass* clasp = obj->getClass();
if (clasp == &FixedLengthArrayBufferObject::class_ ||
clasp == &FixedLengthSharedArrayBufferObject::class_ ||
clasp == &ResizableArrayBufferObject::class_ ||
clasp == &GrowableSharedArrayBufferObject::class_) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsTypedArray) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (!IsTypedArrayClass(obj->getClass())) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsFixedLengthTypedArray) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (!IsFixedLengthTypedArrayClass(obj->getClass())) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardHasProxyHandler) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t handlerOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
BaseProxyHandler* handler = reinterpret_cast<BaseProxyHandler*>(
stubInfo->getStubRawWord(cstub, handlerOffset));
if (obj->as<ProxyObject>().handler() != handler) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardIsNotDOMProxy) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->as<ProxyObject>().handler()->family() ==
GetDOMProxyHandlerFamily()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardSpecificObject) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t expectedOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSObject* expected = reinterpret_cast<JSObject*>(
stubInfo->getStubRawWord(cstub, expectedOffset));
if (obj != expected) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardObjectIdentity) {
ObjOperandId obj1Id = cacheIRReader.objOperandId();
ObjOperandId obj2Id = cacheIRReader.objOperandId();
JSObject* obj1 = reinterpret_cast<JSObject*>(READ_REG(obj1Id.id()));
JSObject* obj2 = reinterpret_cast<JSObject*>(READ_REG(obj2Id.id()));
if (obj1 != obj2) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardSpecificFunction) {
ObjOperandId funId = cacheIRReader.objOperandId();
uint32_t expectedOffset = cacheIRReader.stubOffset();
uint32_t nargsAndFlagsOffset = cacheIRReader.stubOffset();
(void)nargsAndFlagsOffset; // Unused.
uintptr_t expected = stubInfo->getStubRawWord(cstub, expectedOffset);
if (expected != READ_REG(funId.id())) {
FAIL_IC();
}
PREDICT_NEXT(LoadArgumentFixedSlot);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFunctionScript) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t expectedOffset = cacheIRReader.stubOffset();
uint32_t nargsAndFlagsOffset = cacheIRReader.stubOffset();
JSFunction* fun = reinterpret_cast<JSFunction*>(READ_REG(objId.id()));
BaseScript* expected = reinterpret_cast<BaseScript*>(
stubInfo->getStubRawWord(cstub, expectedOffset));
(void)nargsAndFlagsOffset;
if (!fun->hasBaseScript() || fun->baseScript() != expected) {
FAIL_IC();
}
PREDICT_NEXT(CallScriptedFunction);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardSpecificAtom) {
StringOperandId strId = cacheIRReader.stringOperandId();
uint32_t expectedOffset = cacheIRReader.stubOffset();
uintptr_t expected = stubInfo->getStubRawWord(cstub, expectedOffset);
if (expected != READ_REG(strId.id())) {
// TODO: BaselineCacheIRCompiler also checks for equal strings
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardSpecificSymbol) {
SymbolOperandId symId = cacheIRReader.symbolOperandId();
uint32_t expectedOffset = cacheIRReader.stubOffset();
uintptr_t expected = stubInfo->getStubRawWord(cstub, expectedOffset);
if (expected != READ_REG(symId.id())) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardSpecificInt32) {
Int32OperandId numId = cacheIRReader.int32OperandId();
int32_t expected = cacheIRReader.int32Immediate();
if (expected != int32_t(READ_REG(numId.id()))) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardNoDenseElements) {
ObjOperandId objId = cacheIRReader.objOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->as<NativeObject>().getDenseInitializedLength() != 0) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardStringToIndex) {
StringOperandId strId = cacheIRReader.stringOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
JSString* str = reinterpret_cast<JSString*>(READ_REG(strId.id()));
int32_t result;
if (str->hasIndexValue()) {
uint32_t index = str->getIndexValue();
MOZ_ASSERT(index <= INT32_MAX);
result = index;
} else {
result = GetIndexFromString(str);
if (result < 0) {
FAIL_IC();
}
}
WRITE_REG(resultId.id(), result, INT32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardStringToInt32) {
StringOperandId strId = cacheIRReader.stringOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
JSString* str = reinterpret_cast<JSString*>(READ_REG(strId.id()));
int32_t result;
// Use indexed value as fast path if possible.
if (str->hasIndexValue()) {
uint32_t index = str->getIndexValue();
MOZ_ASSERT(index <= INT32_MAX);
result = index;
} else {
if (!GetInt32FromStringPure(ctx.frameMgr.cxForLocalUseOnly(), str,
&result)) {
FAIL_IC();
}
}
WRITE_REG(resultId.id(), result, INT32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardStringToNumber) {
StringOperandId strId = cacheIRReader.stringOperandId();
NumberOperandId resultId = cacheIRReader.numberOperandId();
BOUNDSCHECK(resultId);
JSString* str = reinterpret_cast<JSString*>(READ_REG(strId.id()));
Value result;
// Use indexed value as fast path if possible.
if (str->hasIndexValue()) {
uint32_t index = str->getIndexValue();
MOZ_ASSERT(index <= INT32_MAX);
result = Int32Value(index);
} else {
double value;
if (!StringToNumberPure(ctx.frameMgr.cxForLocalUseOnly(), str,
&value)) {
FAIL_IC();
}
result = DoubleValue(value);
}
WRITE_VALUE_REG(resultId.id(), result);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(BooleanToNumber) {
BooleanOperandId booleanId = cacheIRReader.booleanOperandId();
NumberOperandId resultId = cacheIRReader.numberOperandId();
BOUNDSCHECK(resultId);
uint64_t boolean = READ_REG(booleanId.id());
MOZ_ASSERT((boolean & ~1) == 0);
WRITE_VALUE_REG(resultId.id(), Int32Value(boolean));
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardHasGetterSetter) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t idOffset = cacheIRReader.stubOffset();
uint32_t getterSetterOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
jsid id = jsid::fromRawBits(stubInfo->getStubRawWord(cstub, idOffset));
GetterSetter* getterSetter = reinterpret_cast<GetterSetter*>(
stubInfo->getStubRawWord(cstub, getterSetterOffset));
if (!ObjectHasGetterSetterPure(ctx.frameMgr.cxForLocalUseOnly(), obj,
id, getterSetter)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardInt32IsNonNegative) {
Int32OperandId indexId = cacheIRReader.int32OperandId();
int32_t index = int32_t(READ_REG(indexId.id()));
if (index < 0) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardDynamicSlotIsSpecificObject) {
ObjOperandId objId = cacheIRReader.objOperandId();
ObjOperandId expectedId = cacheIRReader.objOperandId();
uint32_t slotOffset = cacheIRReader.stubOffset();
JSObject* expected =
reinterpret_cast<JSObject*>(READ_REG(expectedId.id()));
uintptr_t slot = stubInfo->getStubRawInt32(cstub, slotOffset);
NativeObject* nobj =
reinterpret_cast<NativeObject*>(READ_REG(objId.id()));
HeapSlot* slots = nobj->getSlotsUnchecked();
// Note that unlike similar opcodes, GuardDynamicSlotIsSpecificObject
// takes a slot index rather than a byte offset.
Value actual = slots[slot];
if (actual != ObjectValue(*expected)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardDynamicSlotIsNotObject) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t slotOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t slot = stubInfo->getStubRawInt32(cstub, slotOffset);
NativeObject* nobj = &obj->as<NativeObject>();
HeapSlot* slots = nobj->getSlotsUnchecked();
// Note that unlike similar opcodes, GuardDynamicSlotIsNotObject takes a
// slot index rather than a byte offset.
Value actual = slots[slot];
if (actual.isObject()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFixedSlotValue) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
uint32_t valOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
Value val =
Value::fromRawBits(stubInfo->getStubRawInt64(cstub, valOffset));
GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
reinterpret_cast<uintptr_t>(obj) + offset);
Value actual = slot->get();
if (actual != val) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardDynamicSlotValue) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
uint32_t valOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
Value val =
Value::fromRawBits(stubInfo->getStubRawInt64(cstub, valOffset));
NativeObject* nobj = &obj->as<NativeObject>();
HeapSlot* slots = nobj->getSlotsUnchecked();
Value actual = slots[offset / sizeof(Value)];
if (actual != val) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadFixedSlot) {
ValOperandId resultId = cacheIRReader.valOperandId();
BOUNDSCHECK(resultId);
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
reinterpret_cast<uintptr_t>(obj) + offset);
Value actual = slot->get();
WRITE_VALUE_REG(resultId.id(), actual);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadDynamicSlot) {
ValOperandId resultId = cacheIRReader.valOperandId();
BOUNDSCHECK(resultId);
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t slotOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t slot = stubInfo->getStubRawInt32(cstub, slotOffset);
NativeObject* nobj = &obj->as<NativeObject>();
HeapSlot* slots = nobj->getSlotsUnchecked();
// Note that unlike similar opcodes, LoadDynamicSlot takes a slot index
// rather than a byte offset.
Value actual = slots[slot];
WRITE_VALUE_REG(resultId.id(), actual);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardNoAllocationMetadataBuilder) {
uint32_t builderAddrOffset = cacheIRReader.stubOffset();
uintptr_t builderAddr =
stubInfo->getStubRawWord(cstub, builderAddrOffset);
if (*reinterpret_cast<uintptr_t*>(builderAddr) != 0) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFunctionHasJitEntry) {
ObjOperandId funId = cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(READ_REG(funId.id()));
uint16_t flags = FunctionFlags::HasJitEntryFlags();
if (!fun->as<JSFunction>().flags().hasFlags(flags)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFunctionHasNoJitEntry) {
ObjOperandId funId = cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(READ_REG(funId.id()));
uint16_t flags = FunctionFlags::HasJitEntryFlags();
if (fun->as<JSFunction>().flags().hasFlags(flags)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFunctionIsNonBuiltinCtor) {
ObjOperandId funId = cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(READ_REG(funId.id()));
if (!fun->as<JSFunction>().isNonBuiltinConstructor()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardFunctionIsConstructor) {
ObjOperandId funId = cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(READ_REG(funId.id()));
if (!fun->as<JSFunction>().isConstructor()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardNotClassConstructor) {
ObjOperandId funId = cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(READ_REG(funId.id()));
if (fun->as<JSFunction>().isClassConstructor()) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardArrayIsPacked) {
ObjOperandId arrayId = cacheIRReader.objOperandId();
JSObject* array = reinterpret_cast<JSObject*>(READ_REG(arrayId.id()));
if (!IsPackedArray(array)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(GuardArgumentsObjectFlags) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint8_t flags = cacheIRReader.readByte();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
if (obj->as<ArgumentsObject>().hasFlags(flags)) {
FAIL_IC();
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadObject) {
ObjOperandId resultId = cacheIRReader.objOperandId();
BOUNDSCHECK(resultId);
uint32_t objOffset = cacheIRReader.stubOffset();
intptr_t obj = stubInfo->getStubRawWord(cstub, objOffset);
WRITE_REG(resultId.id(), obj, OBJECT);
PREDICT_NEXT(GuardShape);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadProtoObject) {
ObjOperandId resultId = cacheIRReader.objOperandId();
BOUNDSCHECK(resultId);
uint32_t protoObjOffset = cacheIRReader.stubOffset();
ObjOperandId receiverObjId = cacheIRReader.objOperandId();
(void)receiverObjId;
intptr_t obj = stubInfo->getStubRawWord(cstub, protoObjOffset);
WRITE_REG(resultId.id(), obj, OBJECT);
PREDICT_NEXT(GuardShape);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadProto) {
ObjOperandId objId = cacheIRReader.objOperandId();
ObjOperandId resultId = cacheIRReader.objOperandId();
BOUNDSCHECK(resultId);
NativeObject* nobj =
reinterpret_cast<NativeObject*>(READ_REG(objId.id()));
WRITE_REG(resultId.id(),
reinterpret_cast<uint64_t>(nobj->staticPrototype()), OBJECT);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadEnclosingEnvironment) {
ObjOperandId objId = cacheIRReader.objOperandId();
ObjOperandId resultId = cacheIRReader.objOperandId();
BOUNDSCHECK(resultId);
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSObject* env = &obj->as<EnvironmentObject>().enclosingEnvironment();
WRITE_REG(resultId.id(), reinterpret_cast<uint64_t>(env), OBJECT);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadWrapperTarget) {
ObjOperandId objId = cacheIRReader.objOperandId();
ObjOperandId resultId = cacheIRReader.objOperandId();
bool fallible = cacheIRReader.readBool();
BOUNDSCHECK(resultId);
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
JSObject* target = obj->as<ProxyObject>().private_().toObjectOrNull();
if (fallible && !target) {
FAIL_IC();
}
WRITE_REG(resultId.id(), reinterpret_cast<uintptr_t>(target), OBJECT);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadValueTag) {
ValOperandId valId = cacheIRReader.valOperandId();
ValueTagOperandId resultId = cacheIRReader.valueTagOperandId();
BOUNDSCHECK(resultId);
Value val = READ_VALUE_REG(valId.id());
WRITE_REG(resultId.id(), val.asRawBits() >> JSVAL_TAG_SHIFT, INT32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadArgumentFixedSlot) {
ValOperandId resultId = cacheIRReader.valOperandId();
BOUNDSCHECK(resultId);
uint8_t slotIndex = cacheIRReader.readByte();
StackVal* sp = ctx.sp();
Value val = sp[slotIndex].asValue();
TRACE_PRINTF(" -> slot %d: val %" PRIx64 "\n", int(slotIndex),
val.asRawBits());
WRITE_VALUE_REG(resultId.id(), val);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(LoadArgumentDynamicSlot) {
ValOperandId resultId = cacheIRReader.valOperandId();
BOUNDSCHECK(resultId);
Int32OperandId argcId = cacheIRReader.int32OperandId();
uint8_t slotIndex = cacheIRReader.readByte();
int32_t argc = int32_t(READ_REG(argcId.id()));
StackVal* sp = ctx.sp();
Value val = sp[slotIndex + argc].asValue();
WRITE_VALUE_REG(resultId.id(), val);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(TruncateDoubleToUInt32) {
NumberOperandId inputId = cacheIRReader.numberOperandId();
Int32OperandId resultId = cacheIRReader.int32OperandId();
BOUNDSCHECK(resultId);
Value input = READ_VALUE_REG(inputId.id());
WRITE_REG(resultId.id(), JS::ToInt32(input.toNumber()), INT32);
DISPATCH_CACHEOP();
}
CACHEOP_CASE(MegamorphicLoadSlotResult) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t nameOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
jsid name =
jsid::fromRawBits(stubInfo->getStubRawWord(cstub, nameOffset));
if (!obj->shape()->isNative()) {
FAIL_IC();
}
Value result;
if (!GetNativeDataPropertyPureWithCacheLookup(
ctx.frameMgr.cxForLocalUseOnly(), obj, name, nullptr,
&result)) {
FAIL_IC();
}
retValue = result.asRawBits();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(MegamorphicLoadSlotByValueResult) {
ObjOperandId objId = cacheIRReader.objOperandId();
ValOperandId idId = cacheIRReader.valOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
Value id = READ_VALUE_REG(idId.id());
if (!obj->shape()->isNative()) {
FAIL_IC();
}
Value values[2] = {id};
if (!GetNativeDataPropertyByValuePure(ctx.frameMgr.cxForLocalUseOnly(),
obj, nullptr, values)) {
FAIL_IC();
}
retValue = values[1].asRawBits();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(MegamorphicSetElement) {
ObjOperandId objId = cacheIRReader.objOperandId();
ValOperandId idId = cacheIRReader.valOperandId();
ValOperandId rhsId = cacheIRReader.valOperandId();
bool strict = cacheIRReader.readBool();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
Value id = READ_VALUE_REG(idId.id());
Value rhs = READ_VALUE_REG(rhsId.id());
{
PUSH_IC_FRAME();
ReservedRooted<JSObject*> obj0(&ctx.state.obj0, obj);
ReservedRooted<Value> value0(&ctx.state.value0, id);
ReservedRooted<Value> value1(&ctx.state.value1, rhs);
if (!SetElementMegamorphic<false>(cx, obj0, value0, value1, strict)) {
ctx.error = PBIResult::Error;
return IC_ERROR_SENTINEL();
}
}
DISPATCH_CACHEOP();
}
CACHEOP_CASE(StoreFixedSlot) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
ValOperandId rhsId = cacheIRReader.valOperandId();
uintptr_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
NativeObject* nobj =
reinterpret_cast<NativeObject*>(READ_REG(objId.id()));
GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
reinterpret_cast<uintptr_t>(nobj) + offset);
Value val = READ_VALUE_REG(rhsId.id());
slot->set(val);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(StoreDynamicSlot) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
ValOperandId rhsId = cacheIRReader.valOperandId();
uint32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
NativeObject* nobj =
reinterpret_cast<NativeObject*>(READ_REG(objId.id()));
HeapSlot* slots = nobj->getSlotsUnchecked();
Value val = READ_VALUE_REG(rhsId.id());
size_t dynSlot = offset / sizeof(Value);
size_t slot = dynSlot + nobj->numFixedSlots();
slots[dynSlot].set(nobj, HeapSlot::Slot, slot, val);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(AddAndStoreFixedSlot) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
ValOperandId rhsId = cacheIRReader.valOperandId();
uint32_t newShapeOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
int32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
Value rhs = READ_VALUE_REG(rhsId.id());
Shape* newShape = reinterpret_cast<Shape*>(
stubInfo->getStubRawWord(cstub, newShapeOffset));
obj->setShape(newShape);
GCPtr<Value>* slot = reinterpret_cast<GCPtr<Value>*>(
reinterpret_cast<uintptr_t>(obj) + offset);
slot->init(rhs);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(AddAndStoreDynamicSlot) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
ValOperandId rhsId = cacheIRReader.valOperandId();
uint32_t newShapeOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
int32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
Value rhs = READ_VALUE_REG(rhsId.id());
Shape* newShape = reinterpret_cast<Shape*>(
stubInfo->getStubRawWord(cstub, newShapeOffset));
NativeObject* nobj = &obj->as<NativeObject>();
obj->setShape(newShape);
HeapSlot* slots = nobj->getSlotsUnchecked();
size_t dynSlot = offset / sizeof(Value);
size_t slot = dynSlot + nobj->numFixedSlots();
slots[dynSlot].init(nobj, HeapSlot::Slot, slot, rhs);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(AllocateAndStoreDynamicSlot) {
ObjOperandId objId = cacheIRReader.objOperandId();
uint32_t offsetOffset = cacheIRReader.stubOffset();
ValOperandId rhsId = cacheIRReader.valOperandId();
uint32_t newShapeOffset = cacheIRReader.stubOffset();
uint32_t numNewSlotsOffset = cacheIRReader.stubOffset();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
int32_t offset = stubInfo->getStubRawInt32(cstub, offsetOffset);
Value rhs = READ_VALUE_REG(rhsId.id());
Shape* newShape = reinterpret_cast<Shape*>(
stubInfo->getStubRawWord(cstub, newShapeOffset));
int32_t numNewSlots =
stubInfo->getStubRawInt32(cstub, numNewSlotsOffset);
NativeObject* nobj = &obj->as<NativeObject>();
// We have to (re)allocate dynamic slots. Do this first, as it's the
// only fallible operation here. Note that growSlotsPure is fallible but
// does not GC. Otherwise this is the same as AddAndStoreDynamicSlot
// above.
if (!NativeObject::growSlotsPure(ctx.frameMgr.cxForLocalUseOnly(), nobj,
numNewSlots)) {
FAIL_IC();
}
obj->setShape(newShape);
HeapSlot* slots = nobj->getSlotsUnchecked();
size_t dynSlot = offset / sizeof(Value);
size_t slot = dynSlot + nobj->numFixedSlots();
slots[dynSlot].init(nobj, HeapSlot::Slot, slot, rhs);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(StoreDenseElement) {
ObjOperandId objId = cacheIRReader.objOperandId();
Int32OperandId indexId = cacheIRReader.int32OperandId();
ValOperandId rhsId = cacheIRReader.valOperandId();
NativeObject* nobj =
reinterpret_cast<NativeObject*>(READ_REG(objId.id()));
ObjectElements* elems = nobj->getElementsHeader();
int32_t index = int32_t(READ_REG(indexId.id()));
if (index < 0 || uint32_t(index) >= nobj->getDenseInitializedLength()) {
FAIL_IC();
}
HeapSlot* slot = &elems->elements()[index];
if (slot->get().isMagic()) {
FAIL_IC();
}
Value val = READ_VALUE_REG(rhsId.id());
slot->set(nobj, HeapSlot::Element, index + elems->numShiftedElements(),
val);
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(StoreDenseElementHole) {
ObjOperandId objId = cacheIRReader.objOperandId();
Int32OperandId indexId = cacheIRReader.int32OperandId();
ValOperandId rhsId = cacheIRReader.valOperandId();
bool handleAdd = cacheIRReader.readBool();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
uint32_t index = uint32_t(READ_REG(indexId.id()));
Value rhs = READ_VALUE_REG(rhsId.id());
NativeObject* nobj = &obj->as<NativeObject>();
uint32_t initLength = nobj->getDenseInitializedLength();
if (index < initLength) {
nobj->setDenseElement(index, rhs);
} else if (!handleAdd || index > initLength) {
FAIL_IC();
} else {
if (index >= nobj->getDenseCapacity()) {
if (!NativeObject::addDenseElementPure(
ctx.frameMgr.cxForLocalUseOnly(), nobj)) {
FAIL_IC();
}
}
nobj->setDenseInitializedLength(initLength + 1);
// Baseline always updates the length field by directly accessing its
// offset in ObjectElements. If the object is not an ArrayObject then
// this field is never read, so it's okay to skip the update here in
// that case.
if (nobj->is<ArrayObject>()) {
ArrayObject* aobj = &nobj->as<ArrayObject>();
uint32_t len = aobj->length();
if (len <= index) {
aobj->setLength(len + 1);
}
}
nobj->initDenseElement(index, rhs);
}
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(ArrayPush) {
ObjOperandId objId = cacheIRReader.objOperandId();
ValOperandId rhsId = cacheIRReader.valOperandId();
JSObject* obj = reinterpret_cast<JSObject*>(READ_REG(objId.id()));
Value rhs = READ_VALUE_REG(rhsId.id());
ArrayObject* aobj = &obj->as<ArrayObject>();
uint32_t initLength = aobj->getDenseInitializedLength();
if (aobj->length() != initLength) {
FAIL_IC();
}
if (initLength >= aobj->getDenseCapacity()) {
if (!NativeObject::addDenseElementPure(
ctx.frameMgr.cxForLocalUseOnly(), aobj)) {
FAIL_IC();
}
}
aobj->setDenseInitializedLength(initLength + 1);
aobj->setLength(initLength + 1);
aobj->initDenseElement(initLength, rhs);
retValue = Int32Value(initLength + 1).asRawBits();
PREDICT_RETURN();
DISPATCH_CACHEOP();
}
CACHEOP_CASE(IsObjectResult) {
ValOperandId inputId = cacheIRReader.valOperandId();
Value val = READ_VALUE_REG(inputId.id());
retValue = BooleanValue(val.isObject()).asRawBits();
PREDICT_RETURN();
DISPATCH_CACHEOP();
}