Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_MacroAssembler_h
#define jit_MacroAssembler_h
#include "mozilla/EndianUtils.h"
#include "mozilla/MacroForEach.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/Maybe.h"
#include "mozilla/Variant.h"
#if defined(JS_CODEGEN_X86)
# include "jit/x86/MacroAssembler-x86.h"
#elif defined(JS_CODEGEN_X64)
# include "jit/x64/MacroAssembler-x64.h"
#elif defined(JS_CODEGEN_ARM)
# include "jit/arm/MacroAssembler-arm.h"
#elif defined(JS_CODEGEN_ARM64)
# include "jit/arm64/MacroAssembler-arm64.h"
#elif defined(JS_CODEGEN_MIPS32)
# include "jit/mips32/MacroAssembler-mips32.h"
#elif defined(JS_CODEGEN_MIPS64)
# include "jit/mips64/MacroAssembler-mips64.h"
#elif defined(JS_CODEGEN_LOONG64)
# include "jit/loong64/MacroAssembler-loong64.h"
#elif defined(JS_CODEGEN_RISCV64)
# include "jit/riscv64/MacroAssembler-riscv64.h"
#elif defined(JS_CODEGEN_WASM32)
# include "jit/wasm32/MacroAssembler-wasm32.h"
#elif defined(JS_CODEGEN_NONE)
# include "jit/none/MacroAssembler-none.h"
#else
# error "Unknown architecture!"
#endif
#include "jit/ABIArgGenerator.h"
#include "jit/ABIFunctions.h"
#include "jit/AtomicOp.h"
#include "jit/IonTypes.h"
#include "jit/MoveResolver.h"
#include "jit/VMFunctions.h"
#include "js/ScalarType.h" // js::Scalar::Type
#include "util/Memory.h"
#include "vm/FunctionFlags.h"
#include "vm/Opcodes.h"
#include "wasm/WasmCodegenTypes.h"
#include "wasm/WasmFrame.h"
// [SMDOC] MacroAssembler multi-platform overview
//
// * How to read/write MacroAssembler method declarations:
//
// The following macros are made to avoid #ifdef around each method declarations
// of the Macro Assembler, and they are also used as an hint on the location of
// the implementations of each method. For example, the following declaration
//
// void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
//
// suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
// x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
//
// - If there is no annotation, then there is only one generic definition in
// MacroAssembler.cpp.
//
// - If the declaration is "inline", then the method definition(s) would be in
// the "-inl.h" variant of the same file(s).
//
// The script check_macroassembler_style.py (which runs on every build) is
// used to verify that method definitions match the annotation on the method
// declarations. If there is any difference, then you either forgot to define
// the method in one of the macro assembler, or you forgot to update the
// annotation of the macro assembler declaration.
//
// Some convenient short-cuts are used to avoid repeating the same list of
// architectures on each method declaration, such as PER_ARCH and
// PER_SHARED_ARCH.
//
// Functions that are architecture-agnostic and are the same for all
// architectures, that it's necessary to define inline *in this header* to
// avoid used-before-defined warnings/errors that would occur if the
// definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
// marker at end of the declaration:
//
// inline uint32_t framePushed() const OOL_IN_HEADER;
//
// Such functions should then be defined immediately after MacroAssembler's
// definition, for example:
//
// //{{{ check_macroassembler_style
// inline uint32_t
// MacroAssembler::framePushed() const
// {
// return framePushed_;
// }
// ////}}} check_macroassembler_style
#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64, loong64, riscv64, wasm32
#define ALL_SHARED_ARCH \
arm, arm64, loong64, riscv64, x86_shared, mips_shared, wasm32
// * How this macro works:
//
// DEFINED_ON is a macro which check if, for the current architecture, the
// method is defined on the macro assembler or not.
//
// For each architecture, we have a macro named DEFINED_ON_arch. This macro is
// empty if this is not the current architecture. Otherwise it must be either
// set to "define" or "crash" (only used for the none target so far).
//
// The DEFINED_ON macro maps the list of architecture names given as arguments
// to a list of macro names. For example,
//
// DEFINED_ON(arm, x86_shared)
//
// is expanded to
//
// DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
//
// which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
// to
//
// define
//
// or if the JIT is disabled or set to no architecture to
//
// crash
//
// or to nothing, if the current architecture is not listed in the list of
// arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
// contributes to the non-empty result, which is the macro of the current
// architecture if it is listed in the arguments of DEFINED_ON.
//
// This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
// which results in either no annotation, a MOZ_CRASH(), or a "= delete"
// annotation on the method declaration.
#define DEFINED_ON_x86
#define DEFINED_ON_x64
#define DEFINED_ON_x86_shared
#define DEFINED_ON_arm
#define DEFINED_ON_arm64
#define DEFINED_ON_mips32
#define DEFINED_ON_mips64
#define DEFINED_ON_mips_shared
#define DEFINED_ON_loong64
#define DEFINED_ON_riscv64
#define DEFINED_ON_wasm32
#define DEFINED_ON_none
// Specialize for each architecture.
#if defined(JS_CODEGEN_X86)
# undef DEFINED_ON_x86
# define DEFINED_ON_x86 define
# undef DEFINED_ON_x86_shared
# define DEFINED_ON_x86_shared define
#elif defined(JS_CODEGEN_X64)
# undef DEFINED_ON_x64
# define DEFINED_ON_x64 define
# undef DEFINED_ON_x86_shared
# define DEFINED_ON_x86_shared define
#elif defined(JS_CODEGEN_ARM)
# undef DEFINED_ON_arm
# define DEFINED_ON_arm define
#elif defined(JS_CODEGEN_ARM64)
# undef DEFINED_ON_arm64
# define DEFINED_ON_arm64 define
#elif defined(JS_CODEGEN_MIPS32)
# undef DEFINED_ON_mips32
# define DEFINED_ON_mips32 define
# undef DEFINED_ON_mips_shared
# define DEFINED_ON_mips_shared define
#elif defined(JS_CODEGEN_MIPS64)
# undef DEFINED_ON_mips64
# define DEFINED_ON_mips64 define
# undef DEFINED_ON_mips_shared
# define DEFINED_ON_mips_shared define
#elif defined(JS_CODEGEN_LOONG64)
# undef DEFINED_ON_loong64
# define DEFINED_ON_loong64 define
#elif defined(JS_CODEGEN_RISCV64)
# undef DEFINED_ON_riscv64
# define DEFINED_ON_riscv64 define
#elif defined(JS_CODEGEN_WASM32)
# undef DEFINED_ON_wasm32
# define DEFINED_ON_wasm32 define
#elif defined(JS_CODEGEN_NONE)
# undef DEFINED_ON_none
# define DEFINED_ON_none crash
#else
# error "Unknown architecture!"
#endif
#define DEFINED_ON_RESULT_crash \
{ MOZ_CRASH(); }
#define DEFINED_ON_RESULT_define
#define DEFINED_ON_RESULT_ = delete
#define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) Macro##Result
#define DEFINED_ON_DISPATCH_RESULT(...) \
DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
// We need to let the evaluation of MOZ_FOR_EACH terminates.
#define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
DEFINED_ON_DISPATCH_RESULT ParenResult
#define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult)
#define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult)
#define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_##Arch
#define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
DEFINED_ON_EXPAND_ARCH_RESULTS( \
(MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
#define DEFINED_ON(...) DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
#define PER_ARCH DEFINED_ON(ALL_ARCH)
#define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
#define OOL_IN_HEADER
namespace JS {
struct ExpandoAndGeneration;
}
namespace js {
class StaticStrings;
class TypedArrayObject;
enum class NativeIteratorIndices : uint32_t;
namespace wasm {
class CalleeDesc;
class CallSiteDesc;
class BytecodeOffset;
class MemoryAccessDesc;
struct ModuleEnvironment;
enum class FailureMode : uint8_t;
enum class SimdOp;
enum class SymbolicAddress;
enum class Trap;
} // namespace wasm
namespace jit {
// Defined in JitFrames.h
enum class ExitFrameType : uint8_t;
class AutoSaveLiveRegisters;
class CompileZone;
class TemplateNativeObject;
class TemplateObject;
enum class CheckUnsafeCallWithABI {
// Require the callee to use AutoUnsafeCallWithABI.
Check,
// We pushed an exit frame so this callWithABI can safely GC and walk the
// stack.
DontCheckHasExitFrame,
// Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
// because we're calling a simple helper function (like malloc or js_free)
// that we can't change and/or that we know won't GC.
DontCheckOther,
};
// This is a global function made to create the DynFn type in a controlled
// environment which would check if the function signature has been registered
// as an ABI function signature.
template <typename Sig>
static inline DynFn DynamicFunction(Sig fun);
enum class CharEncoding { Latin1, TwoByte };
constexpr uint32_t WasmCallerInstanceOffsetBeforeCall =
wasm::FrameWithInstances::callerInstanceOffsetWithoutFrame();
constexpr uint32_t WasmCalleeInstanceOffsetBeforeCall =
wasm::FrameWithInstances::calleeInstanceOffsetWithoutFrame();
// Allocation sites may be passed to GC thing allocation methods either via a
// register (for baseline compilation) or an enum indicating one of the
// catch-all allocation sites (for optimized compilation).
struct AllocSiteInput
: public mozilla::Variant<Register, gc::CatchAllAllocSite> {
using Base = mozilla::Variant<Register, gc::CatchAllAllocSite>;
AllocSiteInput() : Base(gc::CatchAllAllocSite::Unknown) {}
explicit AllocSiteInput(gc::CatchAllAllocSite catchAll) : Base(catchAll) {}
explicit AllocSiteInput(Register reg) : Base(reg) {}
};
#ifdef ENABLE_WASM_TAIL_CALLS
// Instance slots (including ShadowStackArea) and arguments size information
// from two neighboring frames.
// Used in Wasm tail calls to remove frame.
struct ReturnCallAdjustmentInfo {
uint32_t newSlotsAndStackArgBytes;
uint32_t oldSlotsAndStackArgBytes;
ReturnCallAdjustmentInfo(uint32_t newSlotsAndStackArgBytes,
uint32_t oldSlotsAndStackArgBytes)
: newSlotsAndStackArgBytes(newSlotsAndStackArgBytes),
oldSlotsAndStackArgBytes(oldSlotsAndStackArgBytes) {}
};
#endif // ENABLE_WASM_TAIL_CALLS
// [SMDOC] Code generation invariants (incomplete)
//
// ## 64-bit GPRs carrying 32-bit values
//
// At least at the end of every JS or Wasm operation (= SpiderMonkey bytecode or
// Wasm bytecode; this is necessarily a little vague), if a 64-bit GPR has a
// 32-bit value, then the upper 32 bits of the register may be predictable in
// accordance with platform-specific rules, as follows.
//
// - On x64 and arm64, the upper bits are zero
// - On mips64 and loongarch64 the upper bits are the sign extension of the
// lower bits
// - (On risc-v we have no rule, having no port yet. Sign extension is the most
// likely rule, but "unpredictable" is an option.)
//
// In most cases no extra work needs to be done to maintain the invariant:
//
// - 32-bit operations on x64 and arm64 zero-extend the result to 64 bits.
// These operations ignore the upper bits of the inputs.
// - 32-bit operations on mips64 sign-extend the result to 64 bits (even many
// that are labeled as "unsigned", eg ADDU, though not all, eg LU).
// Additionally, the inputs to many 32-bit operations must be properly
// sign-extended to avoid "unpredictable" behavior, and our simulators check
// that inputs conform.
// - (32-bit operations on risc-v and loongarch64 sign-extend, much as mips, but
// appear to ignore the upper bits of the inputs.)
//
// The upshot of these invariants is, among other things, that:
//
// - No code needs to be generated when a 32-bit value is extended to 64 bits
// or a 64-bit value is wrapped to 32 bits, if the upper bits are known to be
// correct because they resulted from an operation that produced them
// predictably.
// - Literal loads must be careful to avoid instructions that might extend the
// literal in the wrong way.
// - Code that produces values using intermediate values with non-canonical
// extensions must extend according to platform conventions before being
// "done".
//
// All optimizations are necessarily platform-specific and should only be used
// in platform-specific code. We may add architectures in the future that do
// not follow the patterns of the few architectures we already have.
//
// Also see MacroAssembler::debugAssertCanonicalInt32().
// The public entrypoint for emitting assembly. Note that a MacroAssembler can
// use cx->lifoAlloc, so take care not to interleave masm use with other
// lifoAlloc use if one will be destroyed before the other.
class MacroAssembler : public MacroAssemblerSpecific {
private:
// Information about the current JSRuntime. This is nullptr only for Wasm
// compilations.
CompileRuntime* maybeRuntime_ = nullptr;
// Information about the current Realm. This is nullptr for Wasm compilations
// and when compiling JitRuntime trampolines.
CompileRealm* maybeRealm_ = nullptr;
// Labels for handling exceptions and failures.
NonAssertingLabel failureLabel_;
protected:
// Constructor is protected. Use one of the derived classes!
explicit MacroAssembler(TempAllocator& alloc,
CompileRuntime* maybeRuntime = nullptr,
CompileRealm* maybeRealm = nullptr);
public:
MoveResolver& moveResolver() {
// As an optimization, the MoveResolver is a persistent data structure
// shared between visitors in the CodeGenerator. This assertion
// checks that state is not leaking from visitor to visitor
// via an unresolved addMove().
MOZ_ASSERT(moveResolver_.hasNoPendingMoves());
return moveResolver_;
}
size_t instructionsSize() const { return size(); }
CompileRealm* realm() const {
MOZ_ASSERT(maybeRealm_);
return maybeRealm_;
}
CompileRuntime* runtime() const {
MOZ_ASSERT(maybeRuntime_);
return maybeRuntime_;
}
#ifdef JS_HAS_HIDDEN_SP
void Push(RegisterOrSP reg);
#endif
#ifdef ENABLE_WASM_SIMD
// `op` should be a shift operation. Return true if a variable-width shift
// operation on this architecture should pre-mask the shift count, and if so,
// return the mask in `*mask`.
static bool MustMaskShiftCountSimd128(wasm::SimdOp op, int32_t* mask);
#endif
//{{{ check_macroassembler_decl_style
public:
// ===============================================================
// MacroAssembler high-level usage.
// Flushes the assembly buffer, on platforms that need it.
void flush() PER_SHARED_ARCH;
// Add a comment that is visible in the pretty printed assembly code.
void comment(const char* msg) PER_SHARED_ARCH;
// ===============================================================
// Frame manipulation functions.
inline uint32_t framePushed() const OOL_IN_HEADER;
inline void setFramePushed(uint32_t framePushed) OOL_IN_HEADER;
inline void adjustFrame(int32_t value) OOL_IN_HEADER;
// Adjust the frame, to account for implicit modification of the stack
// pointer, such that callee can remove arguments on the behalf of the
// caller.
inline void implicitPop(uint32_t bytes) OOL_IN_HEADER;
private:
// This field is used to statically (at compilation time) emulate a frame
// pointer by keeping track of stack manipulations.
//
// It is maintained by all stack manipulation functions below.
uint32_t framePushed_;
public:
// ===============================================================
// Stack manipulation functions -- sets of registers.
// Approximately speaking, the following routines must use the same memory
// layout. Any inconsistencies will certainly lead to crashing in generated
// code:
//
// MacroAssembler::PushRegsInMaskSizeInBytes
// MacroAssembler::PushRegsInMask
// MacroAssembler::storeRegsInMask
// MacroAssembler::PopRegsInMask
// MacroAssembler::PopRegsInMaskIgnore
// FloatRegister::getRegisterDumpOffsetInBytes
// (no class) PushRegisterDump
// (union) RegisterContent
// JitRuntime::generateInvalidator
// JitRuntime::generateBailoutHandler
// JSJitFrameIter::machineState
//
// To be more exact, the invariants are:
//
// * The save area is conceptually viewed as starting at a highest address
// (really, at "highest address - 1") and working down to some lower
// address.
//
// * PushRegsInMask, storeRegsInMask and PopRegsInMask{Ignore} must use
// exactly the same memory layout, when starting from the abovementioned
// highest address.
//
// * PushRegsInMaskSizeInBytes must produce a value which is exactly equal
// to the change in the machine's stack pointer register as a result of
// calling PushRegsInMask or PopRegsInMask{Ignore}. This value must be at
// least uintptr_t-aligned on the target, and may be more aligned than that.
//
// * PushRegsInMaskSizeInBytes must produce a value which is greater than or
// equal to the amount of space used by storeRegsInMask.
//
// * Hence, regardless of whether the save area is created with
// storeRegsInMask or PushRegsInMask, it is guaranteed to fit inside an
// area of size calculated by PushRegsInMaskSizeInBytes.
//
// * For the `ignore` argument of PopRegsInMaskIgnore, equality checking
// for the floating point/SIMD registers is done on the basis of the
// underlying physical register, regardless of width. For example, if the
// to-restore set contains v17 (the SIMD register with encoding 17) and
// the ignore set contains d17 (the double register with encoding 17) then
// no part of the physical register with encoding 17 will be restored.
// (This is probably not true on arm32, since that has aliased float32
// registers; but none of our other targets do.)
//
// * {Push,store}RegsInMask/storeRegsInMask are further constrained as
// follows: when given the argument AllFloatRegisters, the resulting
// memory area must contain exactly all the SIMD/FP registers for the
// target at their widest width (that we care about). [We have no targets
// where the SIMD registers and FP register sets are disjoint.] They must
// be packed end-to-end with no holes, with the register with the lowest
// encoding number (0), as returned by FloatRegister::encoding(), at the
// abovementioned highest address, register 1 just below that, etc.
//
// Furthermore the sizeof(RegisterContent) must equal the size of a SIMD
// register in the abovementioned array.
//
// Furthermore the value returned by
// FloatRegister::getRegisterDumpOffsetInBytes must be a correct index
// into the abovementioned array. Given the constraints, the only correct
// value is `reg.encoding() * sizeof(RegisterContent)`.
//
// Note that some of the routines listed above are JS-only, and do not support
// SIMD registers. They are otherwise part of the same equivalence class.
// Register spilling for e.g. OOL VM calls is implemented using
// PushRegsInMask, and recovered on bailout using machineState. This requires
// the same layout to be used in machineState, and therefore in all other code
// that can spill registers that are recovered on bailout. Implementations of
// JitRuntime::generate{Invalidator,BailoutHandler} should either call
// PushRegsInMask, or check carefully to be sure that they generate the same
// layout.
// The size of the area used by PushRegsInMask.
size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set)
DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
x86_shared);
void PushRegsInMask(LiveRegisterSet set)
DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
x86_shared);
void PushRegsInMask(LiveGeneralRegisterSet set);
// Like PushRegsInMask, but instead of pushing the registers, store them to
// |dest|. |dest| should point to the end of the reserved space, so the
// first register will be stored at |dest.offset - sizeof(register)|. It is
// required that |dest.offset| is at least as large as the value computed by
// PushRegsInMaskSizeInBytes for this |set|. In other words, |dest.base|
// must point to either the lowest address in the save area, or some address
// below that.
void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
x86_shared);
void PopRegsInMask(LiveRegisterSet set);
void PopRegsInMask(LiveGeneralRegisterSet set);
void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
DEFINED_ON(arm, arm64, mips32, mips64, loong64, riscv64, wasm32,
x86_shared);
// ===============================================================
// Stack manipulation functions -- single registers/values.
void Push(const Operand op) DEFINED_ON(x86_shared);
void Push(Register reg) PER_SHARED_ARCH;
void Push(Register reg1, Register reg2, Register reg3, Register reg4)
DEFINED_ON(arm64);
void Push(const Imm32 imm) PER_SHARED_ARCH;
void Push(const ImmWord imm) PER_SHARED_ARCH;
void Push(const ImmPtr imm) PER_SHARED_ARCH;
void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
void Push(FloatRegister reg) PER_SHARED_ARCH;
void PushBoxed(FloatRegister reg) PER_ARCH;
void PushFlags() DEFINED_ON(x86_shared);
void Push(PropertyKey key, Register scratchReg);
void Push(const Address& addr);
void Push(TypedOrValueRegister v);
void Push(const ConstantOrRegister& v);
void Push(const ValueOperand& val);
void Push(const Value& val);
void Push(JSValueType type, Register reg);
void Push(const Register64 reg);
void PushEmptyRooted(VMFunctionData::RootType rootType);
inline CodeOffset PushWithPatch(ImmWord word);
inline CodeOffset PushWithPatch(ImmPtr imm);
void Pop(const Operand op) DEFINED_ON(x86_shared);
void Pop(Register reg) PER_SHARED_ARCH;
void Pop(FloatRegister t) PER_SHARED_ARCH;
void Pop(const ValueOperand& val) PER_SHARED_ARCH;
void PopFlags() DEFINED_ON(x86_shared);
void PopStackPtr()
DEFINED_ON(arm, mips_shared, x86_shared, loong64, riscv64, wasm32);
void popRooted(VMFunctionData::RootType rootType, Register cellReg,
const ValueOperand& valueReg);
// Move the stack pointer based on the requested amount.
void adjustStack(int amount);
void freeStack(uint32_t amount);
// Move the stack pointer to the specified position. It assumes the SP
// register is not valid -- it uses FP to set the position.
void freeStackTo(uint32_t framePushed)
DEFINED_ON(x86_shared, arm, arm64, loong64, mips64);
// Warning: This method does not update the framePushed() counter.
void freeStack(Register amount);
private:
// ===============================================================
// Register allocation fields.
#ifdef DEBUG
friend AutoRegisterScope;
friend AutoFloatRegisterScope;
// Used to track register scopes for debug builds.
// Manipulated by the AutoGenericRegisterScope class.
AllocatableRegisterSet debugTrackedRegisters_;
#endif // DEBUG
public:
// ===============================================================
// Simple call functions.
// The returned CodeOffset is the assembler offset for the instruction
// immediately following the call; that is, for the return point.
CodeOffset call(Register reg) PER_SHARED_ARCH;
CodeOffset call(Label* label) PER_SHARED_ARCH;
void call(const Address& addr) PER_SHARED_ARCH;
void call(ImmWord imm) PER_SHARED_ARCH;
// Call a target native function, which is neither traceable nor movable.
void call(ImmPtr imm) PER_SHARED_ARCH;
CodeOffset call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
inline CodeOffset call(const wasm::CallSiteDesc& desc,
wasm::SymbolicAddress imm);
// Call a target JitCode, which must be traceable, and may be movable.
void call(JitCode* c) PER_SHARED_ARCH;
inline void call(TrampolinePtr code);
inline CodeOffset call(const wasm::CallSiteDesc& desc, const Register reg);
inline CodeOffset call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
CodeOffset callWithPatch() PER_SHARED_ARCH;
void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
// Push the return address and make a call. On platforms where this function
// is not defined, push the link register (pushReturnAddress) at the entry
// point of the callee.
void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
// These do not adjust framePushed().
void pushReturnAddress()
DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
void popReturnAddress()
DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
// Useful for dealing with two-valued returns.
void moveRegPair(Register src0, Register src1, Register dst0, Register dst1,
MoveOp::Type type = MoveOp::GENERAL);
public:
// ===============================================================
// Patchable near/far jumps.
// "Far jumps" provide the ability to jump to any uint32_t offset from any
// other uint32_t offset without using a constant pool (thus returning a
// simple CodeOffset instead of a CodeOffsetJump).
CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
// Emit a nop that can be patched to and from a nop and a call with int32
// relative displacement.
CodeOffset nopPatchableToCall() PER_SHARED_ARCH;
void nopPatchableToCall(const wasm::CallSiteDesc& desc);
static void patchNopToCall(uint8_t* callsite,
uint8_t* target) PER_SHARED_ARCH;
static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
// These methods are like movWithPatch/PatchDataWithValueCheck but allow
// using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
// ADR instruction on arm64).
//
// Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
// release-asserted).
CodeOffset moveNearAddressWithPatch(Register dest) PER_ARCH;
static void patchNearAddressMove(CodeLocationLabel loc,
CodeLocationLabel target)
DEFINED_ON(x86, x64, arm, arm64, loong64, riscv64, wasm32, mips_shared);
public:
// ===============================================================
// [SMDOC] JIT-to-C++ Function Calls (callWithABI)
//
// callWithABI is used to make a call using the standard C/C++ system ABI.
//
// callWithABI is a low level interface for making calls, as such every call
// made with callWithABI should be organized with 6 steps: spilling live
// registers, aligning the stack, listing arguments of the called function,
// calling a function pointer, extracting the returned value and restoring
// live registers.
//
// A more detailed example of the six stages:
//
// 1) Saving of registers that are live. This will vary depending on which
// SpiderMonkey compiler you are working on. Registers that shouldn't be
// restored can be excluded.
//
// LiveRegisterSet volatileRegs(...);
// volatileRegs.take(scratch);
// masm.PushRegsInMask(volatileRegs);
//
// 2) Align the stack to perform the call with the correct stack alignment.
//
// When the stack pointer alignment is unknown and cannot be corrected
// when generating the code, setupUnalignedABICall must be used to
// dynamically align the stack pointer to the expectation of the ABI.
// When the stack pointer is known at JIT compilation time, the stack can
// be fixed manually and setupAlignedABICall and setupWasmABICall can be
// used.
//
// setupWasmABICall is a special case of setupAlignedABICall as
// SpiderMonkey's WebAssembly implementation mostly follow the system
// ABI, except for float/double arguments, which always use floating
// point registers, even if this is not supported by the system ABI.
//
// masm.setupUnalignedABICall(scratch);
//
// 3) Passing arguments. Arguments are passed left-to-right.
//
// masm.passABIArg(scratch);
// masm.passABIArg(FloatOp0, MoveOp::Double);
//
// Note how float register arguments are annotated with MoveOp::Double.
//
// Concerning stack-relative address, see the note on passABIArg.
//
// 4) Make the call:
//
// using Fn = int32_t (*)(int32_t)
// masm.callWithABI<Fn, Callee>();
//
// In the case where the call returns a double, that needs to be
// indicated to the callWithABI like this:
//
// using Fn = double (*)(int32_t)
// masm.callWithABI<Fn, Callee>(MoveOp::DOUBLE);
//
// There are overloads to allow calls to registers and addresses.
//
// 5) Take care of the result
//
// masm.storeCallPointerResult(scratch1);
// masm.storeCallBoolResult(scratch1);
// masm.storeCallInt32Result(scratch1);
// masm.storeCallFloatResult(scratch1);
//
// 6) Restore the potentially clobbered volatile registers
//
// masm.PopRegsInMask(volatileRegs);
//
// If expecting a returned value, this call should use
// PopRegsInMaskIgnore to filter out the registers which are containing
// the returned value.
//
// Unless an exit frame is pushed prior to the setupABICall, the callee
// should not GC. To ensure this is the case callWithABI is instrumented to
// make sure that in the default case callees are annotated with an
// AutoUnsafeCallWithABI on the stack.
//
// A callWithABI can opt out of checking, if for example it is known there
// is an exit frame, or the callee is known not to GC.
//
// If your callee needs to be able to GC, consider using a VMFunction, or
// create a fake exit frame, and instrument the TraceJitExitFrame
// accordingly.
// Setup a call to C/C++ code, given the assumption that the framePushed
// accurately defines the state of the stack, and that the top of the stack
// was properly aligned. Note that this only supports cdecl.
//
// As a rule of thumb, this can be used in CodeGenerator but not in CacheIR or
// Baseline code (because the stack is not aligned to ABIStackAlignment).
void setupAlignedABICall();
// As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
// through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
// can be native, since we always know the stack alignment a priori.
void setupWasmABICall();
// Setup an ABI call for when the alignment is not known. This may need a
// scratch register.
void setupUnalignedABICall(Register scratch) PER_ARCH;
// Arguments must be assigned to a C/C++ call in order. They are moved
// in parallel immediately before performing the call. This process may
// temporarily use more stack, in which case esp-relative addresses will be
// automatically adjusted. It is extremely important that esp-relative
// addresses are computed *after* setupABICall(). Furthermore, no
// operations should be emitted while setting arguments.
void passABIArg(const MoveOperand& from, MoveOp::Type type);
inline void passABIArg(Register reg);
inline void passABIArg(FloatRegister reg, MoveOp::Type type);
inline void callWithABI(
DynFn fun, MoveOp::Type result = MoveOp::GENERAL,
CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
template <typename Sig, Sig fun>
inline void callWithABI(
MoveOp::Type result = MoveOp::GENERAL,
CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
inline void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
inline void callWithABI(const Address& fun,
MoveOp::Type result = MoveOp::GENERAL);
CodeOffset callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
mozilla::Maybe<int32_t> instanceOffset,
MoveOp::Type result = MoveOp::GENERAL);
void callDebugWithABI(wasm::SymbolicAddress fun,
MoveOp::Type result = MoveOp::GENERAL);
private:
// Reinitialize the variables which have to be cleared before making a call
// with callWithABI.
template <class ABIArgGeneratorT>
void setupABICallHelper();
// Reinitialize the variables which have to be cleared before making a call
// with native abi.
void setupNativeABICall();
// Reserve the stack and resolve the arguments move.
void callWithABIPre(uint32_t* stackAdjust,
bool callFromWasm = false) PER_ARCH;
// Emits a call to a C/C++ function, resolving all argument moves.
void callWithABINoProfiler(void* fun, MoveOp::Type result,
CheckUnsafeCallWithABI check);
void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
// Restore the stack to its state before the setup function call.
void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
bool callFromWasm = false) PER_ARCH;
// Create the signature to be able to decode the arguments of a native
// function, when calling a function within the simulator.
inline void appendSignatureType(MoveOp::Type type);
inline ABIFunctionType signature() const;
// Private variables used to handle moves between registers given as
// arguments to passABIArg and the list of ABI registers expected for the
// signature of the function.
MoveResolver moveResolver_;
// Architecture specific implementation which specify how registers & stack
// offsets are used for calling a function.
ABIArgGenerator abiArgs_;
#ifdef DEBUG
// Flag use to assert that we use ABI function in the right context.
bool inCall_;
#endif
// If set by setupUnalignedABICall then callWithABI will pop the stack
// register which is on the stack.
bool dynamicAlignment_;
#ifdef JS_SIMULATOR
// The signature is used to accumulate all types of arguments which are used
// by the caller. This is used by the simulators to decode the arguments
// properly, and cast the function pointer to the right type.
uint32_t signature_;
#endif
public:
// ===============================================================
// Jit Frames.
//
// These functions are used to build the content of the Jit frames. See
// CommonFrameLayout class, and all its derivatives. The content should be
// pushed in the opposite order as the fields of the structures, such that
// the structures can be used to interpret the content of the stack.
// Call the Jit function, and push the return address (or let the callee
// push the return address).
//
// These functions return the offset of the return address, in order to use
// the return address to index the safepoints, which are used to list all
// live registers.
inline uint32_t callJitNoProfiler(Register callee);
inline uint32_t callJit(Register callee);
inline uint32_t callJit(JitCode* code);
inline uint32_t callJit(TrampolinePtr code);
inline uint32_t callJit(ImmPtr callee);
// The frame descriptor is the second field of all Jit frames, pushed before
// calling the Jit function. See CommonFrameLayout::descriptor_.
inline void pushFrameDescriptor(FrameType type);
inline void PushFrameDescriptor(FrameType type);
// For JitFrameLayout, the descriptor also stores the number of arguments
// passed by the caller. See MakeFrameDescriptorForJitCall.
inline void pushFrameDescriptorForJitCall(FrameType type, uint32_t argc);
inline void pushFrameDescriptorForJitCall(FrameType type, Register argc,
Register scratch);
inline void PushFrameDescriptorForJitCall(FrameType type, uint32_t argc);
inline void PushFrameDescriptorForJitCall(FrameType type, Register argc,
Register scratch);
// Load the number of actual arguments from the frame's JitFrameLayout.
inline void loadNumActualArgs(Register framePtr, Register dest);
// Push the callee token of a JSFunction which pointer is stored in the
// |callee| register. The callee token is packed with a |constructing| flag
// which correspond to the fact that the JS function is called with "new" or
// not.
inline void PushCalleeToken(Register callee, bool constructing);
// Unpack a callee token located at the |token| address, and return the
// JSFunction pointer in the |dest| register.
inline void loadFunctionFromCalleeToken(Address token, Register dest);
// This function emulates a call by pushing an exit frame on the stack,
// except that the fake-function is inlined within the body of the caller.
//
// This function assumes that the current frame is an IonJS frame.
//
// This function returns the offset of the /fake/ return address, in order to
// use the return address to index the safepoints, which are used to list all
// live registers.
//
// This function should be balanced with a call to adjustStack, to pop the
// exit frame and emulate the return statement of the inlined function.
inline uint32_t buildFakeExitFrame(Register scratch);
private:
// This function is used by buildFakeExitFrame to push a fake return address
// on the stack. This fake return address should never be used for resuming
// any execution, and can even be an invalid pointer into the instruction
// stream, as long as it does not alias any other.
uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
public:
// ===============================================================
// Exit frame footer.
//
// When calling outside the Jit we push an exit frame. To mark the stack
// correctly, we have to push additional information, called the Exit frame
// footer, which is used to identify how the stack is marked.
//
// See JitFrames.h, and TraceJitExitFrame in JitFrames.cpp.
// Push stub code and the VMFunctionData pointer.
inline void enterExitFrame(Register cxreg, Register scratch,
const VMFunctionData* f);
// Push an exit frame token to identify which fake exit frame this footer
// corresponds to.
inline void enterFakeExitFrame(Register cxreg, Register scratch,
ExitFrameType type);
// Push an exit frame token for a native call.
inline void enterFakeExitFrameForNative(Register cxreg, Register scratch,
bool isConstructing);
// Pop ExitFrame footer in addition to the extra frame.
inline void leaveExitFrame(size_t extraFrame = 0);
private:
// Save the top of the stack into JitActivation::packedExitFP of the
// current thread, which should be the location of the latest exit frame.
void linkExitFrame(Register cxreg, Register scratch);
public:
// ===============================================================
// Move instructions
inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
inline void move64(Register64 src, Register64 dest) PER_ARCH;
inline void moveFloat32ToGPR(FloatRegister src,
Register dest) PER_SHARED_ARCH;
inline void moveGPRToFloat32(Register src,
FloatRegister dest) PER_SHARED_ARCH;
inline void moveDoubleToGPR64(FloatRegister src, Register64 dest) PER_ARCH;
inline void moveGPR64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
// move64To32 will clear the high bits of `dest` on 64-bit systems.
inline void move64To32(Register64 src, Register dest) PER_ARCH;
inline void move32To64ZeroExtend(Register src, Register64 dest) PER_ARCH;
inline void move8To64SignExtend(Register src, Register64 dest) PER_ARCH;
inline void move16To64SignExtend(Register src, Register64 dest) PER_ARCH;
inline void move32To64SignExtend(Register src, Register64 dest) PER_ARCH;
inline void move32SignExtendToPtr(Register src, Register dest) PER_ARCH;
inline void move32ZeroExtendToPtr(Register src, Register dest) PER_ARCH;
// Copy a constant, typed-register, or a ValueOperand into a ValueOperand
// destination.
inline void moveValue(const ConstantOrRegister& src,
const ValueOperand& dest);
void moveValue(const TypedOrValueRegister& src,
const ValueOperand& dest) PER_ARCH;
void moveValue(const ValueOperand& src, const ValueOperand& dest) PER_ARCH;
void moveValue(const Value& src, const ValueOperand& dest) PER_ARCH;
void movePropertyKey(PropertyKey key, Register dest);
// ===============================================================
// Load instructions
inline void load32SignExtendToPtr(const Address& src, Register dest) PER_ARCH;
inline void loadAbiReturnAddress(Register dest) PER_SHARED_ARCH;
// ===============================================================
// Copy instructions
inline void copy64(const Address& src, const Address& dest, Register scratch);
public:
// ===============================================================
// Logical instructions
inline void not32(Register reg) PER_SHARED_ARCH;
inline void notPtr(Register reg) PER_ARCH;
inline void and32(Register src, Register dest) PER_SHARED_ARCH;
inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
inline void andPtr(Register src, Register dest) PER_ARCH;
inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
inline void or32(Register src, Register dest) PER_SHARED_ARCH;
inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
inline void orPtr(Register src, Register dest) PER_ARCH;
inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
inline void and64(Register64 src, Register64 dest) PER_ARCH;
inline void or64(Register64 src, Register64 dest) PER_ARCH;
inline void xor64(Register64 src, Register64 dest) PER_ARCH;
inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void xor32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
inline void xor32(const Address& src, Register dest) PER_SHARED_ARCH;
inline void xorPtr(Register src, Register dest) PER_ARCH;
inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
inline void and64(const Operand& src, Register64 dest)
DEFINED_ON(x64, mips64, loong64, riscv64);
inline void or64(const Operand& src, Register64 dest)
DEFINED_ON(x64, mips64, loong64, riscv64);
inline void xor64(const Operand& src, Register64 dest)
DEFINED_ON(x64, mips64, loong64, riscv64);
// ===============================================================
// Swap instructions
// Swap the two lower bytes and sign extend the result to 32-bit.
inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH;
// Swap the two lower bytes and zero extend the result to 32-bit.
inline void byteSwap16ZeroExtend(Register reg) PER_SHARED_ARCH;
// Swap all four bytes in a 32-bit integer.
inline void byteSwap32(Register reg) PER_SHARED_ARCH;
// Swap all eight bytes in a 64-bit integer.
inline void byteSwap64(Register64 reg) PER_ARCH;
// ===============================================================
// Arithmetic functions
// Condition flags aren't guaranteed to be set by these functions, for example
// x86 will always set condition flags, but ARM64 won't do it unless
// explicitly requested. Instead use branch(Add|Sub|Mul|Neg) to test for
// condition flags after performing arithmetic operations.
inline void add32(Register src, Register dest) PER_SHARED_ARCH;
inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
inline void add32(Imm32 imm, const AbsoluteAddress& dest)
DEFINED_ON(x86_shared);
inline void addPtr(Register src, Register dest) PER_ARCH;
inline void addPtr(Register src1, Register src2, Register dest)
DEFINED_ON(arm64);
inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
inline void addPtr(ImmPtr imm, Register dest);
inline void addPtr(Imm32 imm, const Address& dest)
DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
inline void addPtr(Imm32 imm, const AbsoluteAddress& dest)
DEFINED_ON(x86, x64);
inline void addPtr(const Address& src, Register dest)
DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
inline void add64(Register64 src, Register64 dest) PER_ARCH;
inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
inline void add64(const Operand& src, Register64 dest)
DEFINED_ON(x64, mips64, loong64, riscv64);
inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
// Compute dest=SP-imm where dest is a pointer registers and not SP. The
// offset returned from sub32FromStackPtrWithPatch() must be passed to
// patchSub32FromStackPtr().
inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
inline void subPtr(Register src, Register dest) PER_ARCH;
inline void subPtr(Register src, const Address& dest)
DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
inline void subPtr(const Address& addr, Register dest)
DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
inline void sub64(Register64 src, Register64 dest) PER_ARCH;
inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
inline void sub64(const Operand& src, Register64 dest)
DEFINED_ON(x64, mips64, loong64, riscv64);
inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
inline void mul32(Imm32 imm, Register srcDest) PER_SHARED_ARCH;
inline void mul32(Register src1, Register src2, Register dest, Label* onOver)
DEFINED_ON(arm64);
// Return the high word of the unsigned multiplication into |dest|.
inline void mulHighUnsigned32(Imm32 imm, Register src,
Register dest) PER_ARCH;
inline void mulPtr(Register rhs, Register srcDest) PER_ARCH;
inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
inline void mul64(const Operand& src, const Register64& dest,
const Register temp)
DEFINED_ON(x64, mips64, loong64, riscv64);
inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
DEFINED_ON(x86, x64, arm, mips32, mips64, loong64, riscv64);
inline void mul64(const Register64& src, const Register64& dest,
const Register temp) PER_ARCH;
inline void mul64(const Register64& src1, const Register64& src2,
const Register64& dest) DEFINED_ON(arm64);
inline void mul64(Imm64 src1, const Register64& src2, const Register64& dest)
DEFINED_ON(arm64);
inline void mulBy3(Register src, Register dest) PER_ARCH;
inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
DEFINED_ON(mips_shared, arm, arm64, x86, x64, loong64, riscv64, wasm32);
// Perform an integer division, returning the integer part rounded toward
// zero. rhs must not be zero, and the division must not overflow.
//
// On ARM, the chip must have hardware division instructions.
inline void quotient32(Register rhs, Register srcDest, bool isUnsigned)
DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
// As above, but srcDest must be eax and tempEdx must be edx.
inline void quotient32(Register rhs, Register srcDest, Register tempEdx,
bool isUnsigned) DEFINED_ON(x86_shared);
// Perform an integer division, returning the remainder part.
// rhs must not be zero, and the division must not overflow.
//
// On ARM, the chip must have hardware division instructions.
inline void remainder32(Register rhs, Register srcDest, bool isUnsigned)
DEFINED_ON(mips_shared, arm, arm64, loong64, riscv64, wasm32);
// As above, but srcDest must be eax and tempEdx must be edx.
inline void remainder32(Register rhs, Register srcDest, Register tempEdx,
bool isUnsigned) DEFINED_ON(x86_shared);
// Perform an integer division, returning the integer part rounded toward
// zero. rhs must not be zero, and the division must not overflow.
//
// This variant preserves registers, and doesn't require hardware division
// instructions on ARM (will call out to a runtime routine).
//
// rhs is preserved, srdDest is clobbered.
void flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned,
const LiveRegisterSet& volatileLiveRegs)
DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64, wasm32);
// Perform an integer division, returning the integer part rounded toward
// zero. rhs must not be zero, and the division must not overflow.
//
// This variant preserves registers, and doesn't require hardware division
// instructions on ARM (will call out to a runtime routine).
//
// rhs is preserved, srdDest is clobbered.
void flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned,
const LiveRegisterSet& volatileLiveRegs)
DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64);
// Perform an integer division, returning the integer part rounded toward
// zero. rhs must not be zero, and the division must not overflow. The
// remainder is stored into the third argument register here.
//
// This variant preserves registers, and doesn't require hardware division
// instructions on ARM (will call out to a runtime routine).
//
// rhs is preserved, srdDest and remOutput are clobbered.
void flexibleDivMod32(Register rhs, Register srcDest, Register remOutput,
bool isUnsigned,
const LiveRegisterSet& volatileLiveRegs)
DEFINED_ON(mips_shared, arm, arm64, x86_shared, loong64, riscv64, wasm32);
inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void inc64(AbsoluteAddress dest) PER_ARCH;
inline void neg32(Register reg) PER_SHARED_ARCH;
inline void neg64(Register64 reg) PER_ARCH;
inline void negPtr(Register reg) PER_ARCH;
inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
inline void abs32(Register src, Register dest) PER_SHARED_ARCH;
inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
inline void sqrtFloat32(FloatRegister src,
FloatRegister dest) PER_SHARED_ARCH;
inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
void floorFloat32ToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void floorDoubleToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void ceilFloat32ToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void ceilDoubleToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void roundFloat32ToInt32(FloatRegister src, Register dest, FloatRegister temp,
Label* fail) PER_SHARED_ARCH;
void roundDoubleToInt32(FloatRegister src, Register dest, FloatRegister temp,
Label* fail) PER_SHARED_ARCH;
void truncFloat32ToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void truncDoubleToInt32(FloatRegister src, Register dest,
Label* fail) PER_SHARED_ARCH;
void nearbyIntDouble(RoundingMode mode, FloatRegister src,
FloatRegister dest) PER_SHARED_ARCH;
void nearbyIntFloat32(RoundingMode mode, FloatRegister src,
FloatRegister dest) PER_SHARED_ARCH;
void signInt32(Register input, Register output);
void signDouble(FloatRegister input, FloatRegister output);
void signDoubleToInt32(FloatRegister input, Register output,
FloatRegister temp, Label* fail);
void copySignDouble(FloatRegister lhs, FloatRegister rhs,
FloatRegister output) PER_SHARED_ARCH;
void copySignFloat32(FloatRegister lhs, FloatRegister rhs,
FloatRegister output) DEFINED_ON(x86_shared, arm64);
// Returns a random double in range [0, 1) in |dest|. The |rng| register must
// hold a pointer to a mozilla::non_crypto::XorShift128PlusRNG.
void randomDouble(Register rng, FloatRegister dest, Register64 temp0,
Register64 temp1);
// srcDest = {min,max}{Float32,Double}(srcDest, other)
// For min and max, handle NaN specially if handleNaN is true.
inline void minFloat32(FloatRegister other, FloatRegister srcDest,
bool handleNaN) PER_SHARED_ARCH;
inline void minDouble(FloatRegister other, FloatRegister srcDest,
bool handleNaN) PER_SHARED_ARCH;
inline void maxFloat32(FloatRegister other, FloatRegister srcDest,
bool handleNaN) PER_SHARED_ARCH;
inline void maxDouble(FloatRegister other, FloatRegister srcDest,
bool handleNaN) PER_SHARED_ARCH;
void minMaxArrayInt32(Register array, Register result, Register temp1,
Register temp2, Register temp3, bool isMax,
Label* fail);
void minMaxArrayNumber(Register array, FloatRegister result,
FloatRegister floatTemp, Register temp1,
Register temp2, bool isMax, Label* fail);
// Compute |pow(base, power)| and store the result in |dest|. If the result
// exceeds the int32 range, jumps to |onOver|.
// |base| and |power| are preserved, the other input registers are clobbered.
void pow32(Register base, Register power, Register dest, Register temp1,
Register temp2, Label* onOver);
void sameValueDouble(FloatRegister left, FloatRegister right,
FloatRegister temp, Register dest);
void branchIfNotRegExpPrototypeOptimizable(Register proto, Register temp,
const GlobalObject* maybeGlobal,
Label* label);
void branchIfNotRegExpInstanceOptimizable(Register regexp, Register temp,
const GlobalObject* maybeGlobal,
Label* label);
void loadRegExpLastIndex(Register regexp, Register string, Register lastIndex,
Label* notFoundZeroLastIndex);
void loadAndClearRegExpSearcherLastLimit(Register result, Register scratch);
void loadParsedRegExpShared(Register regexp, Register result,
Label* unparsed);
// ===============================================================
// Shift functions
// For shift-by-register there may be platform-specific variations, for
// example, x86 will perform the shift mod 32 but ARM will perform the shift
// mod 256.
//
// For shift-by-immediate the platform assembler may restrict the immediate,
// for example, the ARM assembler requires the count for 32-bit shifts to be
// in the range [0,31].
inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
inline void rshiftPtr(Imm32 imm, Register src, Register dest)
DEFINED_ON(arm64);
inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
// On x86_shared these have the constraint that shift must be in CL.
inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
inline void rshift32Arithmetic(Register shift,
Register srcDest) PER_SHARED_ARCH;
inline void lshiftPtr(Register shift, Register srcDest) PER_ARCH;
inline void rshiftPtr(Register shift, Register srcDest) PER_ARCH;
// These variants do not have the above constraint, but may emit some extra
// instructions on x86_shared. They also handle shift >= 32 consistently by
// masking with 0x1F (either explicitly or relying on the hardware to do
// that).
inline void flexibleLshift32(Register shift,
Register srcDest) PER_SHARED_ARCH;
inline void flexibleRshift32(Register shift,
Register srcDest) PER_SHARED_ARCH;
inline void flexibleRshift32Arithmetic(Register shift,
Register srcDest) PER_SHARED_ARCH;
inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
// ===============================================================
// Rotation functions
// Note: - on x86 and x64 the count register must be in CL.
// - on x64 the temp register should be InvalidReg.
inline void rotateLeft(Imm32 count, Register input,
Register dest) PER_SHARED_ARCH;
inline void rotateLeft(Register count, Register input,
Register dest) PER_SHARED_ARCH;
inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest)
DEFINED_ON(x64);
inline void rotateLeft64(Register count, Register64 input, Register64 dest)
DEFINED_ON(x64);
inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest,
Register temp) PER_ARCH;
inline void rotateLeft64(Register count, Register64 input, Register64 dest,
Register temp) PER_ARCH;
inline void rotateRight(Imm32 count, Register input,
Register dest) PER_SHARED_ARCH;
inline void rotateRight(Register count, Register input,
Register dest) PER_SHARED_ARCH;
inline void rotateRight64(Imm32 count, Register64 input, Register64 dest)
DEFINED_ON(x64);
inline void rotateRight64(Register count, Register64 input, Register64 dest)
DEFINED_ON(x64);
inline void rotateRight64(Imm32 count, Register64 input, Register64 dest,
Register temp) PER_ARCH;
inline void rotateRight64(Register count, Register64 input, Register64 dest,
Register temp) PER_ARCH;
// ===============================================================
// Bit counting functions
// knownNotZero may be true only if the src is known not to be zero.
inline void clz32(Register src, Register dest,
bool knownNotZero) PER_SHARED_ARCH;
inline void ctz32(Register src, Register dest,
bool knownNotZero) PER_SHARED_ARCH;
inline void clz64(Register64 src, Register dest) PER_ARCH;
inline void ctz64(Register64 src, Register dest) PER_ARCH;
// On x86_shared, temp may be Invalid only if the chip has the POPCNT
// instruction. On ARM, temp may never be Invalid.
inline void popcnt32(Register src, Register dest,
Register temp) PER_SHARED_ARCH;
// temp may be invalid only if the chip has the POPCNT instruction.
inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
// ===============================================================
// Condition functions
inline void cmp8Set(Condition cond, Address lhs, Imm32 rhs,
Register dest) PER_SHARED_ARCH;
inline void cmp16Set(Condition cond, Address lhs, Imm32 rhs,
Register dest) PER_SHARED_ARCH;
template <typename T1, typename T2>
inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, loong64, riscv64,
wasm32);
// Only the NotEqual and Equal conditions are allowed.
inline void cmp64Set(Condition cond, Address lhs, Imm64 rhs,
Register dest) PER_ARCH;
template <typename T1, typename T2>
inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH;
// ===============================================================
// Branch functions
inline void branch8(Condition cond, const Address& lhs, Imm32 rhs,
Label* label) PER_SHARED_ARCH;
// Compares the byte in |lhs| against |rhs| using a 8-bit comparison on
// x86/x64 or a 32-bit comparison (all other platforms). The caller should
// ensure |rhs| is a zero- resp. sign-extended byte value for cross-platform
// compatible code.
inline void branch8(Condition cond, const BaseIndex& lhs, Register rhs,
Label* label) PER_SHARED_ARCH;
inline void branch16(Condition cond, const Address& lhs, Imm32 rhs,
Label* label) PER_SHARED_ARCH;
template <class L>
inline void branch32(Condition cond, Register lhs, Register rhs,
L label) PER_SHARED_ARCH;
template <class L>
inline void branch32(Condition cond, Register lhs, Imm32 rhs,
L label) PER_SHARED_ARCH;
inline void branch32(Condition cond, Register lhs, const Address& rhs,
Label* label) DEFINED_ON(arm64);
inline void branch32(Condition cond, const Address& lhs, Register rhs,
Label* label) PER_SHARED_ARCH;
inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
Label* label) PER_SHARED_ARCH;
inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs,
Label* label) DEFINED_ON(arm, x86_shared);
inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
Label* label) PER_SHARED_ARCH;
inline void branch32(Condition cond, const Operand& lhs, Register rhs,
Label* label) DEFINED_ON(x86_shared);
inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs,
Label* label) DEFINED_ON(x86_shared);
inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
// The supported condition are Equal, NotEqual, LessThan(orEqual),
// GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
// is not defined it will fall through to next instruction, else jump to the
// fail label.
inline void branch64(Condition cond, Register64 lhs, Imm64 val,
Label* success, Label* fail = nullptr) PER_ARCH;
inline void branch64(Condition cond, Register64 lhs, Register64 rhs,
Label* success, Label* fail = nullptr) PER_ARCH;
// Only the NotEqual and Equal conditions are allowed for the branch64
// variants with Address as lhs.
inline void branch64(Condition cond, const Address& lhs, Imm64 val,
Label* label) PER_ARCH;
inline void branch64(Condition cond, const Address& lhs, Register64 rhs,
Label* label) PER_ARCH;
// Compare the value at |lhs| with the value at |rhs|. The scratch
// register *must not* be the base of |lhs| or |rhs|.
inline void branch64(Condition cond, const Address& lhs, const Address& rhs,
Register scratch, Label* label) PER_ARCH;
template <class L>
inline void branchPtr(Condition cond, Register lhs, Register rhs,
L label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, Register lhs, Imm32 rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, Register lhs, ImmWord rhs,
Label* label) PER_SHARED_ARCH;
template <class L>
inline void branchPtr(Condition cond, const Address& lhs, Register rhs,
L label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const BaseIndex& lhs, Register rhs,
Label* label) PER_SHARED_ARCH;
inline void branchPtr(Condition cond, const AbsoluteAddress& lhs,
Register rhs, Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
// Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
// chunk header, or nullptr if it is in the tenured heap.
void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
Label* label)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
void branchPtrInNurseryChunk(Condition cond, const Address& address,
Register temp, Label* label) DEFINED_ON(x86);
void branchValueIsNurseryCell(Condition cond, const Address& address,
Register temp, Label* label) PER_ARCH;
void branchValueIsNurseryCell(Condition cond, ValueOperand value,
Register temp, Label* label) PER_ARCH;
// This function compares a Value (lhs) which is having a private pointer
// boxed inside a js::Value, with a raw pointer (rhs).
inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs,
Label* label) PER_ARCH;
inline void branchFloat(DoubleCondition cond, FloatRegister lhs,
FloatRegister rhs, Label* label) PER_SHARED_ARCH;
// Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
// jump to the failure label. This particular variant is allowed to return the
// value module 2**32, which isn't implemented on all architectures. E.g. the
// x64 variants will do this only in the int64_t range.
inline void branchTruncateFloat32MaybeModUint32(FloatRegister src,
Register dest, Label* fail)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branchTruncateDoubleMaybeModUint32(FloatRegister src,
Register dest, Label* fail)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
// Truncate a double/float32 to intptr and when it doesn't fit jump to the
// failure label.
inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest,
Label* fail) DEFINED_ON(x86, x64);
inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest,
Label* fail) DEFINED_ON(x86, x64);
// Truncate a double/float32 to int32 and when it doesn't fit jump to the
// failure label.
inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest,
Label* fail)
DEFINED_ON(arm, arm64, mips_shared, x86, x64, loong64, riscv64, wasm32);
inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest,
Label* fail) PER_ARCH;
inline void branchDouble(DoubleCondition cond, FloatRegister lhs,
FloatRegister rhs, Label* label) PER_SHARED_ARCH;
inline void branchDoubleNotInInt64Range(Address src, Register temp,
Label* fail);
inline void branchDoubleNotInUInt64Range(Address src, Register temp,
Label* fail);
inline void branchFloat32NotInInt64Range(Address src, Register temp,
Label* fail);
inline void branchFloat32NotInUInt64Range(Address src, Register temp,
Label* fail);
template <typename T>
inline void branchAdd32(Condition cond, T src, Register dest,
Label* label) PER_SHARED_ARCH;
template <typename T>
inline void branchSub32(Condition cond, T src, Register dest,
Label* label) PER_SHARED_ARCH;
template <typename T>
inline void branchMul32(Condition cond, T src, Register dest,
Label* label) PER_SHARED_ARCH;
template <typename T>
inline void branchRshift32(Condition cond, T src, Register dest,
Label* label) PER_SHARED_ARCH;
inline void branchNeg32(Condition cond, Register reg,
Label* label) PER_SHARED_ARCH;
inline void branchAdd64(Condition cond, Imm64 imm, Register64 dest,
Label* label) DEFINED_ON(x86, arm, wasm32);