Source code
Revision control
Copy as Markdown
Other Tools
// Copyright 2015, VIXL authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of ARM Limited nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef VIXL_A64_ASSEMBLER_A64_H_
#define VIXL_A64_ASSEMBLER_A64_H_
#include "jit/arm64/vixl/Cpu-vixl.h"
#include "jit/arm64/vixl/Globals-vixl.h"
#include "jit/arm64/vixl/Instructions-vixl.h"
#include "jit/arm64/vixl/MozBaseAssembler-vixl.h"
#include "jit/arm64/vixl/Utils-vixl.h"
#include "jit/JitSpewer.h"
#include "jit/shared/Assembler-shared.h"
#include "jit/shared/Disassembler-shared.h"
#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
#if defined(_M_ARM64)
#ifdef mvn
#undef mvn
#endif
#endif
namespace vixl {
using js::jit::BufferOffset;
using js::jit::Label;
using js::jit::Address;
using js::jit::BaseIndex;
using js::jit::DisassemblerSpew;
using LabelDoc = DisassemblerSpew::LabelDoc;
typedef uint64_t RegList;
static const int kRegListSizeInBits = sizeof(RegList) * 8;
// Registers.
// Some CPURegister methods can return Register or VRegister types, so we need
// to declare them in advance.
class Register;
class VRegister;
class CPURegister {
public:
enum RegisterType {
// The kInvalid value is used to detect uninitialized static instances,
// which are always zero-initialized before any constructors are called.
kInvalid = 0,
kRegister,
kVRegister,
kFPRegister = kVRegister,
kNoRegister
};
constexpr CPURegister() : code_(0), size_(0), type_(kNoRegister) {
}
constexpr CPURegister(unsigned code, unsigned size, RegisterType type)
: code_(code), size_(size), type_(type) {
}
unsigned code() const {
VIXL_ASSERT(IsValid());
return code_;
}
RegisterType type() const {
VIXL_ASSERT(IsValidOrNone());
return type_;
}
RegList Bit() const {
VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
}
unsigned size() const {
VIXL_ASSERT(IsValid());
return size_;
}
int SizeInBytes() const {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(size() % 8 == 0);
return size_ / 8;
}
int SizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
bool Is8Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 8;
}
bool Is16Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 16;
}
bool Is32Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 32;
}
bool Is64Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 64;
}
bool Is128Bits() const {
VIXL_ASSERT(IsValid());
return size_ == 128;
}
bool IsValid() const {
if (IsValidRegister() || IsValidVRegister()) {
VIXL_ASSERT(!IsNone());
return true;
} else {
// This assert is hit when the register has not been properly initialized.
// One cause for this can be an initialisation order fiasco. See
VIXL_ASSERT(IsNone());
return false;
}
}
bool IsValidRegister() const {
return IsRegister() &&
((size_ == kWRegSize) || (size_ == kXRegSize)) &&
((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
}
bool IsValidVRegister() const {
return IsVRegister() &&
((size_ == kBRegSize) || (size_ == kHRegSize) ||
(size_ == kSRegSize) || (size_ == kDRegSize) ||
(size_ == kQRegSize)) &&
(code_ < kNumberOfVRegisters);
}
bool IsValidFPRegister() const {
return IsFPRegister() && (code_ < kNumberOfVRegisters);
}
bool IsNone() const {
// kNoRegister types should always have size 0 and code 0.
VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
return type_ == kNoRegister;
}
bool Aliases(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return (code_ == other.code_) && (type_ == other.type_);
}
bool Is(const CPURegister& other) const {
VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
return Aliases(other) && (size_ == other.size_);
}
bool IsZero() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode);
}
bool IsSP() const {
VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode);
}
bool IsRegister() const {
return type_ == kRegister;
}
bool IsVRegister() const {
return type_ == kVRegister;
}
bool IsFPRegister() const {
return IsS() || IsD();
}
bool IsW() const { return IsValidRegister() && Is32Bits(); }
bool IsX() const { return IsValidRegister() && Is64Bits(); }
// These assertions ensure that the size and type of the register are as
// described. They do not consider the number of lanes that make up a vector.
// So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
// does not imply Is1D() or Is8B().
// Check the number of lanes, ie. the format of the vector, using methods such
// as Is8B(), Is1D(), etc. in the VRegister class.
bool IsV() const { return IsVRegister(); }
bool IsB() const { return IsV() && Is8Bits(); }
bool IsH() const { return IsV() && Is16Bits(); }
bool IsS() const { return IsV() && Is32Bits(); }
bool IsD() const { return IsV() && Is64Bits(); }
bool IsQ() const { return IsV() && Is128Bits(); }
const Register& W() const;
const Register& X() const;
const VRegister& V() const;
const VRegister& B() const;
const VRegister& H() const;
const VRegister& S() const;
const VRegister& D() const;
const VRegister& Q() const;
bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && (type_ == other.type_);
}
protected:
unsigned code_;
unsigned size_;
RegisterType type_;
private:
bool IsValidOrNone() const {
return IsValid() || IsNone();
}
};
class Register : public CPURegister {
public:
Register() : CPURegister() {}
explicit Register(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidRegister());
}
constexpr Register(unsigned code, unsigned size)
: CPURegister(code, size, kRegister) {}
constexpr Register(js::jit::Register r, unsigned size)
: CPURegister(r.code(), size, kRegister) {}
bool IsValid() const {
VIXL_ASSERT(IsRegister() || IsNone());
return IsValidRegister();
}
js::jit::Register asUnsized() const {
// asUnsized() is only ever used on temp registers or on registers that
// are known not to be SP, and there should be no risk of it being
// applied to SP. Check anyway.
VIXL_ASSERT(code_ != kSPRegInternalCode);
return js::jit::Register::FromCode((js::jit::Register::Code)code_);
}
static const Register& WRegFromCode(unsigned code);
static const Register& XRegFromCode(unsigned code);
private:
static const Register wregisters[];
static const Register xregisters[];
};
class VRegister : public CPURegister {
public:
VRegister() : CPURegister(), lanes_(1) {}
explicit VRegister(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()), lanes_(1) {
VIXL_ASSERT(IsValidVRegister());
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
constexpr VRegister(unsigned code, unsigned size, unsigned lanes = 1)
: CPURegister(code, size, kVRegister), lanes_(lanes) {
// VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
constexpr VRegister(js::jit::FloatRegister r)
: CPURegister(r.encoding(), r.size() * 8, kVRegister), lanes_(1) {
}
constexpr VRegister(js::jit::FloatRegister r, unsigned size)
: CPURegister(r.encoding(), size, kVRegister), lanes_(1) {
}
VRegister(unsigned code, VectorFormat format)
: CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
}
bool IsValid() const {
VIXL_ASSERT(IsVRegister() || IsNone());
return IsValidVRegister();
}
static const VRegister& BRegFromCode(unsigned code);
static const VRegister& HRegFromCode(unsigned code);
static const VRegister& SRegFromCode(unsigned code);
static const VRegister& DRegFromCode(unsigned code);
static const VRegister& QRegFromCode(unsigned code);
static const VRegister& VRegFromCode(unsigned code);
VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
// For consistency, we assert the number of lanes of these scalar registers,
// even though there are no vectors of equivalent total size with which they
// could alias.
bool Is1B() const {
VIXL_ASSERT(!(Is8Bits() && IsVector()));
return Is8Bits();
}
bool Is1H() const {
VIXL_ASSERT(!(Is16Bits() && IsVector()));
return Is16Bits();
}
bool Is1S() const {
VIXL_ASSERT(!(Is32Bits() && IsVector()));
return Is32Bits();
}
bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; }
bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; }
bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; }
bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; }
int lanes() const {
return lanes_;
}
bool IsScalar() const {
return lanes_ == 1;
}
bool IsVector() const {
return lanes_ > 1;
}
bool IsSameFormat(const VRegister& other) const {
return (size_ == other.size_) && (lanes_ == other.lanes_);
}
unsigned LaneSizeInBytes() const {
return SizeInBytes() / lanes_;
}
unsigned LaneSizeInBits() const {
return LaneSizeInBytes() * 8;
}
private:
static const VRegister bregisters[];
static const VRegister hregisters[];
static const VRegister sregisters[];
static const VRegister dregisters[];
static const VRegister qregisters[];
static const VRegister vregisters[];
int lanes_;
};
// Backward compatibility for FPRegisters.
typedef VRegister FPRegister;
// No*Reg is used to indicate an unused argument, or an error case. Note that
// these all compare equal (using the Is() method). The Register and VRegister
// variants are provided for convenience.
const Register NoReg;
const VRegister NoVReg;
const FPRegister NoFPReg; // For backward compatibility.
const CPURegister NoCPUReg;
#define DEFINE_REGISTERS(N) \
constexpr Register w##N(N, kWRegSize); \
constexpr Register x##N(N, kXRegSize);
REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
constexpr Register wsp(kSPRegInternalCode, kWRegSize);
constexpr Register sp(kSPRegInternalCode, kXRegSize);
#define DEFINE_VREGISTERS(N) \
constexpr VRegister b##N(N, kBRegSize); \
constexpr VRegister h##N(N, kHRegSize); \
constexpr VRegister s##N(N, kSRegSize); \
constexpr VRegister d##N(N, kDRegSize); \
constexpr VRegister q##N(N, kQRegSize); \
constexpr VRegister v##N(N, kQRegSize);
REGISTER_CODE_LIST(DEFINE_VREGISTERS)
#undef DEFINE_VREGISTERS
// Registers aliases.
constexpr Register ip0 = x16;
constexpr Register ip1 = x17;
constexpr Register lr = x30;
constexpr Register xzr = x31;
constexpr Register wzr = w31;
// AreAliased returns true if any of the named registers overlap. Arguments
// set to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreSameSizeAndType returns true if all of the specified registers have the
// same size, and are of the same type. The system stack pointer may be
// specified. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreSameSizeAndType(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg,
const CPURegister& reg5 = NoCPUReg,
const CPURegister& reg6 = NoCPUReg,
const CPURegister& reg7 = NoCPUReg,
const CPURegister& reg8 = NoCPUReg);
// AreEven returns true if all of the specified registers have even register
// indices. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
bool AreEven(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoReg,
const CPURegister& reg4 = NoReg,
const CPURegister& reg5 = NoReg,
const CPURegister& reg6 = NoReg,
const CPURegister& reg7 = NoReg,
const CPURegister& reg8 = NoReg);
// AreConsecutive returns true if all of the specified registers are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoCPUReg).
bool AreConsecutive(const CPURegister& reg1,
const CPURegister& reg2,
const CPURegister& reg3 = NoCPUReg,
const CPURegister& reg4 = NoCPUReg);
// AreSameFormat returns true if all of the specified VRegisters have the same
// vector format. Arguments set to NoReg are ignored, as are any subsequent
// arguments. At least one argument (reg1) must be valid (not NoVReg).
bool AreSameFormat(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// AreConsecutive returns true if all of the specified VRegisters are
// consecutive in the register file. Arguments set to NoReg are ignored, as are
// any subsequent arguments. At least one argument (reg1) must be valid
// (not NoVReg).
bool AreConsecutive(const VRegister& reg1,
const VRegister& reg2,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
// Lists of registers.
class CPURegList {
public:
explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.size()), type_(reg1.type()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
VIXL_ASSERT(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kVRegister) &&
(last_reg < kNumberOfVRegisters)));
VIXL_ASSERT(last_reg >= first_reg);
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
list_ &= ~((UINT64_C(1) << first_reg) - 1);
VIXL_ASSERT(IsValid());
}
CPURegister::RegisterType type() const {
VIXL_ASSERT(IsValid());
return type_;
}
// Combine another CPURegList into this one. Registers that already exist in
// this list are left unchanged. The type and size of the registers in the
// 'other' list must match those in this list.
void Combine(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.RegisterSizeInBits() == size_);
list_ |= other.list();
}
// Remove every register in the other CPURegList from this one. Registers that
// do not exist in this list are ignored. The type and size of the registers
// in the 'other' list must match those in this list.
void Remove(const CPURegList& other) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.RegisterSizeInBits() == size_);
list_ &= ~other.list();
}
// Variants of Combine and Remove which take a single register.
void Combine(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Combine(other.code());
}
void Remove(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_);
Remove(other.code());
}
// Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list.
void Combine(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code);
}
void Remove(int code) {
VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code);
}
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
bool Overlaps(const CPURegList& other) const {
return (type_ == other.type_) && ((list_ & other.list_) != 0);
}
RegList list() const {
VIXL_ASSERT(IsValid());
return list_;
}
void set_list(RegList new_list) {
VIXL_ASSERT(IsValid());
list_ = new_list;
}
// Remove all callee-saved registers from the list. This can be useful when
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
CPURegister PopLowestIndex();
CPURegister PopHighestIndex();
// AAPCS64 callee-saved registers.
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
// AAPCS64 caller-saved registers. Note that this includes lr.
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
// 64-bits being caller-saved.
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
bool IsEmpty() const {
VIXL_ASSERT(IsValid());
return list_ == 0;
}
bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid());
return (type_ == other.type()) && ((other.Bit() & list_) != 0);
}
bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid());
return ((code & list_) != 0);
}
int Count() const {
VIXL_ASSERT(IsValid());
return CountSetBits(list_);
}
unsigned RegisterSizeInBits() const {
VIXL_ASSERT(IsValid());
return size_;
}
unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8;
}
unsigned TotalSizeInBytes() const {
VIXL_ASSERT(IsValid());
return RegisterSizeInBytes() * Count();
}
private:
RegList list_;
unsigned size_;
CPURegister::RegisterType type_;
bool IsValid() const;
};
// AAPCS64 callee-saved registers.
extern const CPURegList kCalleeSaved;
extern const CPURegList kCalleeSavedV;
// AAPCS64 caller-saved registers. Note that this includes lr.
extern const CPURegList kCallerSaved;
extern const CPURegList kCallerSavedV;
// Operand.
class Operand {
public:
// #<immediate>
// where <immediate> is int64_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
// rm, {<shift> #<shift_amount>}
// where <shift> is one of {LSL, LSR, ASR, ROR}.
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
Operand(Register reg,
Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, {<extend> {#<shift_amount>}}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
bool IsImmediate() const;
bool IsShiftedRegister() const;
bool IsExtendedRegister() const;
bool IsZero() const;
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
// which helps in the encoding of instructions that use the stack pointer.
Operand ToExtendedRegister() const;
int64_t immediate() const {
VIXL_ASSERT(IsImmediate());
return immediate_;
}
Register reg() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
}
CPURegister maybeReg() const {
if (IsShiftedRegister() || IsExtendedRegister())
return reg_;
return NoCPUReg;
}
Shift shift() const {
VIXL_ASSERT(IsShiftedRegister());
return shift_;
}
Extend extend() const {
VIXL_ASSERT(IsExtendedRegister());
return extend_;
}
unsigned shift_amount() const {
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
private:
int64_t immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// MemOperand represents the addressing mode of a load or store instruction.
class MemOperand {
public:
explicit MemOperand(Register base,
int64_t offset = 0,
AddrMode addrmode = Offset);
MemOperand(Register base,
Register regoffset,
Shift shift = LSL,
unsigned shift_amount = 0);
MemOperand(Register base,
Register regoffset,
Extend extend,
unsigned shift_amount = 0);
MemOperand(Register base,
const Operand& offset,
AddrMode addrmode = Offset);
// Adapter constructors using C++11 delegating.
// TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
explicit MemOperand(js::jit::Address addr)
: MemOperand(IsHiddenSP(addr.base) ? sp : Register(AsRegister(addr.base), 64),
(ptrdiff_t)addr.offset) {
}
const Register& base() const { return base_; }
const Register& regoffset() const { return regoffset_; }
int64_t offset() const { return offset_; }
AddrMode addrmode() const { return addrmode_; }
Shift shift() const { return shift_; }
Extend extend() const { return extend_; }
unsigned shift_amount() const { return shift_amount_; }
bool IsImmediateOffset() const;
bool IsRegisterOffset() const;
bool IsPreIndex() const;
bool IsPostIndex() const;
void AddOffset(int64_t offset);
private:
Register base_;
Register regoffset_;
int64_t offset_;
AddrMode addrmode_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
};
// Control whether or not position-independent code should be emitted.
enum PositionIndependentCodeOption {
// All code generated will be position-independent; all branches and
// references to labels generated with the Label class will use PC-relative
// addressing.
PositionIndependentCode,
// Allow VIXL to generate code that refers to absolute addresses. With this
// option, it will not be possible to copy the code buffer and run it from a
// different address; code must be generated in its final location.
PositionDependentCode,
// Allow VIXL to assume that the bottom 12 bits of the address will be
// constant, but that the top 48 bits may change. This allows `adrp` to
// function in systems which copy code between pages, but otherwise maintain
// 4KB page alignment.
PageOffsetDependentCode
};
// Control how scaled- and unscaled-offset loads and stores are generated.
enum LoadStoreScalingOption {
// Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
// register-offset, pre-index or post-index instructions if necessary.
PreferScaledOffset,
// Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
// register-offset, pre-index or post-index instructions if necessary.
PreferUnscaledOffset,
// Require scaled-immediate-offset instructions.
RequireScaledOffset,
// Require unscaled-immediate-offset instructions.
RequireUnscaledOffset
};
// Assembler.
class Assembler : public MozBaseAssembler {
public:
Assembler(PositionIndependentCodeOption pic = PositionIndependentCode);
// System functions.
// Finalize a code buffer of generated instructions. This function must be
// called before executing or copying code from the buffer.
void FinalizeCode();
#define COPYENUM(v) static const Condition v = vixl::v
#define COPYENUM_(v) static const Condition v = vixl::v##_
COPYENUM(Equal);
COPYENUM(Zero);
COPYENUM(NotEqual);
COPYENUM(NonZero);
COPYENUM(AboveOrEqual);
COPYENUM(CarrySet);
COPYENUM(Below);
COPYENUM(CarryClear);
COPYENUM(Signed);
COPYENUM(NotSigned);
COPYENUM(Overflow);
COPYENUM(NoOverflow);
COPYENUM(Above);
COPYENUM(BelowOrEqual);
COPYENUM_(GreaterThanOrEqual);
COPYENUM_(LessThan);
COPYENUM_(GreaterThan);
COPYENUM_(LessThanOrEqual);
COPYENUM(Always);
COPYENUM(Never);
#undef COPYENUM
#undef COPYENUM_
// Bit set when a DoubleCondition does not map to a single ARM condition.
// The MacroAssembler must special-case these conditions, or else
// ConditionFromDoubleCondition will complain.
static const int DoubleConditionBitSpecial = 0x100;
enum DoubleCondition {
DoubleOrdered = Condition::vc,
DoubleEqual = Condition::eq,
DoubleNotEqual = Condition::ne | DoubleConditionBitSpecial,
DoubleGreaterThan = Condition::gt,
DoubleGreaterThanOrEqual = Condition::ge,
DoubleLessThan = Condition::lo, // Could also use Condition::mi.
DoubleLessThanOrEqual = Condition::ls,
// If either operand is NaN, these conditions always evaluate to true.
DoubleUnordered = Condition::vs,
DoubleEqualOrUnordered = Condition::eq | DoubleConditionBitSpecial,
DoubleNotEqualOrUnordered = Condition::ne,
DoubleGreaterThanOrUnordered = Condition::hi,
DoubleGreaterThanOrEqualOrUnordered = Condition::hs,
DoubleLessThanOrUnordered = Condition::lt,
DoubleLessThanOrEqualOrUnordered = Condition::le
};
static inline Condition InvertCondition(Condition cond) {
// Conditions al and nv behave identically, as "always true". They can't be
// inverted, because there is no "always false" condition.
VIXL_ASSERT((cond != al) && (cond != nv));
return static_cast<Condition>(cond ^ 1);
}
// This is chaging the condition codes for cmp a, b to the same codes for cmp b, a.
static inline Condition InvertCmpCondition(Condition cond) {
// Conditions al and nv behave identically, as "always true". They can't be
// inverted, because there is no "always false" condition.
switch (cond) {
case eq:
case ne:
return cond;
case gt:
return le;
case le:
return gt;
case ge:
return lt;
case lt:
return ge;
case hi:
return lo;
case lo:
return hi;
case hs:
return ls;
case ls:
return hs;
case mi:
return pl;
case pl:
return mi;
default:
MOZ_CRASH("TODO: figure this case out.");
}
return static_cast<Condition>(cond ^ 1);
}
static inline DoubleCondition InvertCondition(DoubleCondition cond) {
switch (cond) {
case DoubleOrdered:
return DoubleUnordered;
case DoubleEqual:
return DoubleNotEqualOrUnordered;
case DoubleNotEqual:
return DoubleEqualOrUnordered;
case DoubleGreaterThan:
return DoubleLessThanOrEqualOrUnordered;
case DoubleGreaterThanOrEqual:
return DoubleLessThanOrUnordered;
case DoubleLessThan:
return DoubleGreaterThanOrEqualOrUnordered;
case DoubleLessThanOrEqual:
return DoubleGreaterThanOrUnordered;
case DoubleUnordered:
return DoubleOrdered;
case DoubleEqualOrUnordered:
return DoubleNotEqual;
case DoubleNotEqualOrUnordered:
return DoubleEqual;
case DoubleGreaterThanOrUnordered:
return DoubleLessThanOrEqual;
case DoubleGreaterThanOrEqualOrUnordered:
return DoubleLessThan;
case DoubleLessThanOrUnordered:
return DoubleGreaterThanOrEqual;
case DoubleLessThanOrEqualOrUnordered:
return DoubleGreaterThan;
default:
MOZ_CRASH("Bad condition");
}
}
static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
VIXL_ASSERT(!(cond & DoubleConditionBitSpecial));
return static_cast<Condition>(cond);
}
// Instruction set functions.
// Branch / Jump instructions.
// Branch to register.
void br(const Register& xn);
static void br(Instruction* at, const Register& xn);
// Branch with link to register.
void blr(const Register& xn);
static void blr(Instruction* at, const Register& blr);
// Branch to register with return hint.
void ret(const Register& xn = lr);
// Unconditional branch to label.
BufferOffset b(Label* label);
// Conditional branch to label.
BufferOffset b(Label* label, Condition cond);
// Unconditional branch to PC offset.
BufferOffset b(int imm26, const LabelDoc& doc);
static void b(Instruction* at, int imm26);
// Conditional branch to PC offset.
BufferOffset b(int imm19, Condition cond, const LabelDoc& doc);
static void b(Instruction*at, int imm19, Condition cond);
// Branch with link to label.
void bl(Label* label);
// Branch with link to PC offset.
void bl(int imm26, const LabelDoc& doc);
static void bl(Instruction* at, int imm26);
// Compare and branch to label if zero.
void cbz(const Register& rt, Label* label);
// Compare and branch to PC offset if zero.
void cbz(const Register& rt, int imm19, const LabelDoc& doc);
static void cbz(Instruction* at, const Register& rt, int imm19);
// Compare and branch to label if not zero.
void cbnz(const Register& rt, Label* label);
// Compare and branch to PC offset if not zero.
void cbnz(const Register& rt, int imm19, const LabelDoc& doc);
static void cbnz(Instruction* at, const Register& rt, int imm19);
// Table lookup from one register.
void tbl(const VRegister& vd,
const VRegister& vn,
const VRegister& vm);
// Table lookup from two registers.
void tbl(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vm);
// Table lookup from three registers.
void tbl(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vn3,
const VRegister& vm);
// Table lookup from four registers.
void tbl(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vn3,
const VRegister& vn4,
const VRegister& vm);
// Table lookup extension from one register.
void tbx(const VRegister& vd,
const VRegister& vn,
const VRegister& vm);
// Table lookup extension from two registers.
void tbx(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vm);
// Table lookup extension from three registers.
void tbx(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vn3,
const VRegister& vm);
// Table lookup extension from four registers.
void tbx(const VRegister& vd,
const VRegister& vn,
const VRegister& vn2,
const VRegister& vn3,
const VRegister& vn4,
const VRegister& vm);
// Test bit and branch to label if zero.
void tbz(const Register& rt, unsigned bit_pos, Label* label);
// Test bit and branch to PC offset if zero.
void tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
static void tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
// Test bit and branch to label if not zero.
void tbnz(const Register& rt, unsigned bit_pos, Label* label);
// Test bit and branch to PC offset if not zero.
void tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
static void tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
// Address calculation instructions.
// Calculate a PC-relative address. Unlike for branches the offset in adr is
// unscaled (i.e. the result can be unaligned).
// Calculate the address of a label.
void adr(const Register& rd, Label* label);
// Calculate the address of a PC offset.
void adr(const Register& rd, int imm21, const LabelDoc& doc);
static void adr(Instruction* at, const Register& rd, int imm21);
// Calculate the page address of a label.
void adrp(const Register& rd, Label* label);
// Calculate the page address of a PC offset.
void adrp(const Register& rd, int imm21, const LabelDoc& doc);
static void adrp(Instruction* at, const Register& rd, int imm21);
// Data Processing instructions.
// Add.
void add(const Register& rd,
const Register& rn,
const Operand& operand);
// Add and update status flags.
void adds(const Register& rd,
const Register& rn,
const Operand& operand);
// Compare negative.
void cmn(const Register& rn, const Operand& operand);
// Subtract.
void sub(const Register& rd,
const Register& rn,
const Operand& operand);
// Subtract and update status flags.
void subs(const Register& rd,
const Register& rn,
const Operand& operand);
// Compare.
void cmp(const Register& rn, const Operand& operand);
// Negate.
void neg(const Register& rd,
const Operand& operand);
// Negate and update status flags.
void negs(const Register& rd,
const Operand& operand);
// Add with carry bit.
void adc(const Register& rd,
const Register& rn,
const Operand& operand);
// Add with carry bit and update status flags.
void adcs(const Register& rd,
const Register& rn,
const Operand& operand);
// Subtract with carry bit.
void sbc(const Register& rd,
const Register& rn,
const Operand& operand);
// Subtract with carry bit and update status flags.
void sbcs(const Register& rd,
const Register& rn,
const Operand& operand);
// Negate with carry bit.
void ngc(const Register& rd,
const Operand& operand);
// Negate with carry bit and update status flags.
void ngcs(const Register& rd,
const Operand& operand);
// Logical instructions.
// Bitwise and (A & B).
void and_(const Register& rd,
const Register& rn,
const Operand& operand);
// Bitwise and (A & B) and update status flags.
BufferOffset ands(const Register& rd,
const Register& rn,
const Operand& operand);
// Bit test and set flags.
BufferOffset tst(const Register& rn, const Operand& operand);
// Bit clear (A & ~B).
void bic(const Register& rd,
const Register& rn,
const Operand& operand);
// Bit clear (A & ~B) and update status flags.
void bics(const Register& rd,
const Register& rn,
const Operand& operand);
// Bitwise or (A | B).
void orr(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise nor (A | ~B).
void orn(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise eor/xor (A ^ B).
void eor(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise enor/xnor (A ^ ~B).
void eon(const Register& rd, const Register& rn, const Operand& operand);
// Logical shift left by variable.
void lslv(const Register& rd, const Register& rn, const Register& rm);
// Logical shift right by variable.
void lsrv(const Register& rd, const Register& rn, const Register& rm);
// Arithmetic shift right by variable.
void asrv(const Register& rd, const Register& rn, const Register& rm);
// Rotate right by variable.
void rorv(const Register& rd, const Register& rn, const Register& rm);
// Bitfield instructions.
// Bitfield move.
void bfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms);
// Signed bitfield move.
void sbfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms);
// Unsigned bitfield move.
void ubfm(const Register& rd,
const Register& rn,
unsigned immr,
unsigned imms);
// Bfm aliases.
// Bitfield insert.
void bfi(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Bitfield extract and insert low.
void bfxil(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, lsb, lsb + width - 1);
}
// Sbfm aliases.
// Arithmetic shift right.
void asr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
sbfm(rd, rn, shift, rd.size() - 1);
}
// Signed bitfield insert with zero at right.
void sbfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Signed bitfield extract.
void sbfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, lsb, lsb + width - 1);
}
// Signed extend byte.
void sxtb(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 7);
}
// Signed extend halfword.
void sxth(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 15);
}
// Signed extend word.
void sxtw(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 31);
}
// Ubfm aliases.
// Logical shift left.
void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.size();
VIXL_ASSERT(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
void lsr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size());
ubfm(rd, rn, shift, rd.size() - 1);
}
// Unsigned bitfield insert with zero at right.
void ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
}
// Unsigned bitfield extract.
void ubfx(const Register& rd,
const Register& rn,
unsigned lsb,
unsigned width) {
VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, lsb, lsb + width - 1);
}
// Unsigned extend byte.
void uxtb(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 7);
}
// Unsigned extend halfword.
void uxth(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 15);
}
// Unsigned extend word.
void uxtw(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 31);
}
// Extract.
void extr(const Register& rd,
const Register& rn,
const Register& rm,
unsigned lsb);
// Conditional select: rd = cond ? rn : rm.
void csel(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond);
// Conditional select increment: rd = cond ? rn : rm + 1.
void csinc(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond);
// Conditional select inversion: rd = cond ? rn : ~rm.
void csinv(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond);
// Conditional select negation: rd = cond ? rn : -rm.
void csneg(const Register& rd,
const Register& rn,
const Register& rm,
Condition cond);
// Conditional set: rd = cond ? 1 : 0.
void cset(const Register& rd, Condition cond);
// Conditional set mask: rd = cond ? -1 : 0.
void csetm(const Register& rd, Condition cond);
// Conditional increment: rd = cond ? rn + 1 : rn.
void cinc(const Register& rd, const Register& rn, Condition cond);
// Conditional invert: rd = cond ? ~rn : rn.
void cinv(const Register& rd, const Register& rn, Condition cond);
// Conditional negate: rd = cond ? -rn : rn.
void cneg(const Register& rd, const Register& rn, Condition cond);
// Rotate right.
void ror(const Register& rd, const Register& rs, unsigned shift) {
extr(rd, rs, rs, shift);
}
// Conditional comparison.
// Conditional compare negative.
void ccmn(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond);
// Conditional compare.
void ccmp(const Register& rn,
const Operand& operand,
StatusFlags nzcv,
Condition cond);
// CRC-32 checksum from byte.
void crc32b(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 checksum from half-word.
void crc32h(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 checksum from word.
void crc32w(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 checksum from double word.
void crc32x(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 C checksum from byte.
void crc32cb(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 C checksum from half-word.
void crc32ch(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32 C checksum from word.
void crc32cw(const Register& rd,
const Register& rn,
const Register& rm);
// CRC-32C checksum from double word.
void crc32cx(const Register& rd,
const Register& rn,
const Register& rm);
// Multiply.
void mul(const Register& rd, const Register& rn, const Register& rm);
// Negated multiply.
void mneg(const Register& rd, const Register& rn, const Register& rm);
// Signed long multiply: 32 x 32 -> 64-bit.
void smull(const Register& rd, const Register& rn, const Register& rm);
// Signed multiply high: 64 x 64 -> 64-bit <127:64>.
void smulh(const Register& xd, const Register& xn, const Register& xm);
// Multiply and accumulate.
void madd(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Multiply and subtract.
void msub(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
void smaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
void umaddl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Unsigned long multiply: 32 x 32 -> 64-bit.
void umull(const Register& rd,
const Register& rn,
const Register& rm) {
umaddl(rd, rn, rm, xzr);
}
// Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
void umulh(const Register& xd,
const Register& xn,
const Register& xm);
// Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
void smsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
void umsubl(const Register& rd,
const Register& rn,
const Register& rm,
const Register& ra);
// Signed integer divide.
void sdiv(const Register& rd, const Register& rn, const Register& rm);
// Unsigned integer divide.
void udiv(const Register& rd, const Register& rn, const Register& rm);
// Bit reverse.
void rbit(const Register& rd, const Register& rn);
// Reverse bytes in 16-bit half words.
void rev16(const Register& rd, const Register& rn);
// Reverse bytes in 32-bit words.
void rev32(const Register& rd, const Register& rn);
// Reverse bytes.
void rev(const Register& rd, const Register& rn);
// Count leading zeroes.
void clz(const Register& rd, const Register& rn);
// Count leading sign bits.
void cls(const Register& rd, const Register& rn);
// Memory instructions.
// Load integer or FP register.
void ldr(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store integer or FP register.
void str(const CPURegister& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load word with sign extension.
void ldrsw(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load byte.
void ldrb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store byte.
void strb(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load byte with sign extension.
void ldrsb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load half-word.
void ldrh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Store half-word.
void strh(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferScaledOffset);
// Load half-word with sign extension.
void ldrsh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferScaledOffset);
// Load integer or FP register (with unscaled offset).
void ldur(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store integer or FP register (with unscaled offset).
void stur(const CPURegister& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load word with sign extension.
void ldursw(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load byte (with unscaled offset).
void ldurb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store byte (with unscaled offset).
void sturb(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load byte with sign extension (and unscaled offset).
void ldursb(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load half-word (with unscaled offset).
void ldurh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Store half-word (with unscaled offset).
void sturh(const Register& rt, const MemOperand& dst,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load half-word with sign extension (and unscaled offset).
void ldursh(const Register& rt, const MemOperand& src,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Load integer or FP register pair.
void ldp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
// Store integer or FP register pair.
void stp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
// Load word pair with sign extension.
void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
// Load integer or FP register pair, non-temporal.
void ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
// Store integer or FP register pair, non-temporal.
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
// Load integer or FP register from pc + imm19 << 2.
void ldr(const CPURegister& rt, int imm19);
static void ldr(Instruction* at, const CPURegister& rt, int imm19);
// Load word with sign extension from pc + imm19 << 2.
void ldrsw(const Register& rt, int imm19);
// Store exclusive byte.
void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
// Store exclusive half-word.
void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
// Store exclusive register.
void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
// Load exclusive byte.
void ldxrb(const Register& rt, const MemOperand& src);
// Load exclusive half-word.
void ldxrh(const Register& rt, const MemOperand& src);
// Load exclusive register.
void ldxr(const Register& rt, const MemOperand& src);
// Store exclusive register pair.
void stxp(const Register& rs,
const Register& rt,
const Register& rt2,
const MemOperand& dst);
// Load exclusive register pair.
void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
// Store-release exclusive byte.
void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
// Store-release exclusive register.
void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
// Load-acquire exclusive byte.
void ldaxrb(const Register& rt, const MemOperand& src);
// Load-acquire exclusive half-word.
void ldaxrh(const Register& rt, const MemOperand& src);
// Load-acquire exclusive register.
void ldaxr(const Register& rt, const MemOperand& src);
// Store-release exclusive register pair.
void stlxp(const Register& rs,
const Register& rt,
const Register& rt2,
const MemOperand& dst);
// Load-acquire exclusive register pair.
void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
// Store-release byte.
void stlrb(const Register& rt, const MemOperand& dst);
// Store-release half-word.
void stlrh(const Register& rt, const MemOperand& dst);
// Store-release register.
void stlr(const Register& rt, const MemOperand& dst);
// Load-acquire byte.
void ldarb(const Register& rt, const MemOperand& src);
// Load-acquire half-word.
void ldarh(const Register& rt, const MemOperand& src);
// Load-acquire register.
void ldar(const Register& rt, const MemOperand& src);
// Compare and Swap word or doubleword in memory [Armv8.1].
void cas(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap word or doubleword in memory [Armv8.1].
void casa(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap word or doubleword in memory [Armv8.1].
void casl(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap word or doubleword in memory [Armv8.1].
void casal(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap byte in memory [Armv8.1].
void casb(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap byte in memory [Armv8.1].
void casab(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap byte in memory [Armv8.1].
void caslb(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap byte in memory [Armv8.1].
void casalb(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap halfword in memory [Armv8.1].
void cash(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap halfword in memory [Armv8.1].
void casah(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap halfword in memory [Armv8.1].
void caslh(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap halfword in memory [Armv8.1].
void casalh(const Register& rs, const Register& rt, const MemOperand& src);
// Compare and Swap Pair of words or doublewords in memory [Armv8.1].
void casp(const Register& rs,
const Register& rs2,
const Register& rt,
const Register& rt2,
const MemOperand& src);
// Compare and Swap Pair of words or doublewords in memory [Armv8.1].
void caspa(const Register& rs,
const Register& rs2,
const Register& rt,
const Register& rt2,
const MemOperand& src);
// Compare and Swap Pair of words or doublewords in memory [Armv8.1].
void caspl(const Register& rs,
const Register& rs2,
const Register& rt,
const Register& rt2,
const MemOperand& src);
// Compare and Swap Pair of words or doublewords in memory [Armv8.1].
void caspal(const Register& rs,
const Register& rs2,
const Register& rt,
const Register& rt2,
const MemOperand& src);
// Atomic add on byte in memory [Armv8.1]
void ldaddb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on byte in memory, with Load-acquire semantics [Armv8.1]
void ldaddab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on byte in memory, with Store-release semantics [Armv8.1]
void ldaddlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on byte in memory, with Load-acquire and Store-release semantics
// [Armv8.1]
void ldaddalb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on halfword in memory [Armv8.1]
void ldaddh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on halfword in memory, with Load-acquire semantics [Armv8.1]
void ldaddah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on halfword in memory, with Store-release semantics [Armv8.1]
void ldaddlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on halfword in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldaddalh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on word or doubleword in memory [Armv8.1]
void ldadd(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on word or doubleword in memory, with Load-acquire semantics
// [Armv8.1]
void ldadda(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on word or doubleword in memory, with Store-release semantics
// [Armv8.1]
void ldaddl(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic add on word or doubleword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldaddal(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on byte in memory [Armv8.1]
void ldclrb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on byte in memory, with Load-acquire semantics [Armv8.1]
void ldclrab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on byte in memory, with Store-release semantics [Armv8.1]
void ldclrlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on byte in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldclralb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on halfword in memory [Armv8.1]
void ldclrh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on halfword in memory, with Load-acquire semantics
// [Armv8.1]
void ldclrah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on halfword in memory, with Store-release semantics
// [Armv8.1]
void ldclrlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on halfword in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldclralh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on word or doubleword in memory [Armv8.1]
void ldclr(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on word or doubleword in memory, with Load-acquire
// semantics [Armv8.1]
void ldclra(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on word or doubleword in memory, with Store-release
// semantics [Armv8.1]
void ldclrl(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit clear on word or doubleword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldclral(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on byte in memory [Armv8.1]
void ldeorb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on byte in memory, with Load-acquire semantics
// [Armv8.1]
void ldeorab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on byte in memory, with Store-release semantics
// [Armv8.1]
void ldeorlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on byte in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldeoralb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on halfword in memory [Armv8.1]
void ldeorh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on halfword in memory, with Load-acquire semantics
// [Armv8.1]
void ldeorah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on halfword in memory, with Store-release semantics
// [Armv8.1]
void ldeorlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on halfword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldeoralh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on word or doubleword in memory [Armv8.1]
void ldeor(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on word or doubleword in memory, with Load-acquire
// semantics [Armv8.1]
void ldeora(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on word or doubleword in memory, with Store-release
// semantics [Armv8.1]
void ldeorl(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic exclusive OR on word or doubleword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldeoral(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on byte in memory [Armv8.1]
void ldsetb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on byte in memory, with Load-acquire semantics [Armv8.1]
void ldsetab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on byte in memory, with Store-release semantics [Armv8.1]
void ldsetlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on byte in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldsetalb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on halfword in memory [Armv8.1]
void ldseth(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on halfword in memory, with Load-acquire semantics [Armv8.1]
void ldsetah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on halfword in memory, with Store-release semantics
// [Armv8.1]
void ldsetlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on halfword in memory, with Load-acquire and Store-release
// semantics [Armv8.1]
void ldsetalh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on word or doubleword in memory [Armv8.1]
void ldset(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on word or doubleword in memory, with Load-acquire semantics
// [Armv8.1]
void ldseta(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on word or doubleword in memory, with Store-release
// semantics [Armv8.1]
void ldsetl(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic bit set on word or doubleword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldsetal(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on byte in memory [Armv8.1]
void ldsmaxb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on byte in memory, with Load-acquire semantics
// [Armv8.1]
void ldsmaxab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on byte in memory, with Store-release semantics
// [Armv8.1]
void ldsmaxlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on byte in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldsmaxalb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on halfword in memory [Armv8.1]
void ldsmaxh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on halfword in memory, with Load-acquire semantics
// [Armv8.1]
void ldsmaxah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on halfword in memory, with Store-release semantics
// [Armv8.1]
void ldsmaxlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on halfword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldsmaxalh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on word or doubleword in memory [Armv8.1]
void ldsmax(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on word or doubleword in memory, with Load-acquire
// semantics [Armv8.1]
void ldsmaxa(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on word or doubleword in memory, with Store-release
// semantics [Armv8.1]
void ldsmaxl(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed maximum on word or doubleword in memory, with Load-acquire
// and Store-release semantics [Armv8.1]
void ldsmaxal(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on byte in memory [Armv8.1]
void ldsminb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on byte in memory, with Load-acquire semantics
// [Armv8.1]
void ldsminab(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on byte in memory, with Store-release semantics
// [Armv8.1]
void ldsminlb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on byte in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldsminalb(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on halfword in memory [Armv8.1]
void ldsminh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on halfword in memory, with Load-acquire semantics
// [Armv8.1]
void ldsminah(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on halfword in memory, with Store-release semantics
// [Armv8.1]
void ldsminlh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on halfword in memory, with Load-acquire and
// Store-release semantics [Armv8.1]
void ldsminalh(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on word or doubleword in memory [Armv8.1]
void ldsmin(const Register& rs, const Register& rt, const MemOperand& src);
// Atomic signed minimum on word or doubleword in memory, with Load-acquire
// semantics [Armv8.1]
void ldsmina(const Register& rs, const Register& rt, const MemOperand& src);