Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "jit/riscv64/MacroAssembler-riscv64.h"
#include "jsmath.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/MacroAssembler.h"
#include "jit/MoveEmitter.h"
#include "jit/riscv64/SharedICRegisters-riscv64.h"
#include "util/Memory.h"
#include "vm/JitActivation.h" // jit::JitActivation
#include "vm/JSContext.h"
#include "jit/MacroAssembler-inl.h"
namespace js {
namespace jit {
MacroAssembler& MacroAssemblerRiscv64::asMasm() {
return *static_cast<MacroAssembler*>(this);
}
const MacroAssembler& MacroAssemblerRiscv64::asMasm() const {
return *static_cast<const MacroAssembler*>(this);
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
Condition c) {
if (imm.value <= INT32_MAX) {
ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ma_li(scratch, imm);
ma_cmp_set(rd, rj, scratch, c);
}
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
Condition c) {
ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address, Imm32 imm,
Condition c) {
// TODO(loong64): 32-bit ma_cmp_set?
UseScratchRegisterScope temps(this);
Register scratch2 = temps.Acquire();
ma_load(scratch2, address, SizeWord);
ma_cmp_set(rd, Register(scratch2), imm, c);
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Address address,
ImmWord imm, Condition c) {
UseScratchRegisterScope temps(this);
Register scratch2 = temps.Acquire();
ma_load(scratch2, address, SizeDouble);
ma_cmp_set(rd, Register(scratch2), imm, c);
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
Condition c) {
if (imm.value == 0) {
switch (c) {
case Equal:
case BelowOrEqual:
ma_sltu(rd, rj, Operand(1));
break;
case NotEqual:
case Above:
sltu(rd, zero, rj);
break;
case AboveOrEqual:
case Below:
ori(rd, zero, c == AboveOrEqual ? 1 : 0);
break;
case GreaterThan:
case LessThanOrEqual:
slt(rd, zero, rj);
if (c == LessThanOrEqual) {
xori(rd, rd, 1);
}
break;
case LessThan:
case GreaterThanOrEqual:
slt(rd, rj, zero);
if (c == GreaterThanOrEqual) {
xori(rd, rd, 1);
}
break;
case Zero:
ma_sltu(rd, rj, Operand(1));
break;
case NonZero:
sltu(rd, zero, rj);
break;
case Signed:
slt(rd, rj, zero);
break;
case NotSigned:
slt(rd, rj, zero);
xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition.");
}
return;
}
switch (c) {
case Equal:
case NotEqual:
ma_xor(rd, rj, imm);
if (c == Equal) {
ma_sltu(rd, rd, Operand(1));
} else {
sltu(rd, zero, rd);
}
break;
case Zero:
case NonZero:
case Signed:
case NotSigned:
MOZ_CRASH("Invalid condition.");
default:
Condition cond = ma_cmp(rd, rj, imm, c);
MOZ_ASSERT(cond == Equal || cond == NotEqual);
if (cond == Equal) xori(rd, rd, 1);
}
}
Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
Register rhs, Condition c) {
switch (c) {
case Above:
// bgtu s,t,label =>
// sltu at,t,s
// bne at,$zero,offs
sltu(dest, rhs, lhs);
return NotEqual;
case AboveOrEqual:
// bgeu s,t,label =>
// sltu at,s,t
// beq at,$zero,offs
sltu(dest, lhs, rhs);
return Equal;
case Below:
// bltu s,t,label =>
// sltu at,s,t
// bne at,$zero,offs
sltu(dest, lhs, rhs);
return NotEqual;
case BelowOrEqual:
// bleu s,t,label =>
// sltu at,t,s
// beq at,$zero,offs
sltu(dest, rhs, lhs);
return Equal;
case GreaterThan:
// bgt s,t,label =>
// slt at,t,s
// bne at,$zero,offs
slt(dest, rhs, lhs);
return NotEqual;
case GreaterThanOrEqual:
// bge s,t,label =>
// slt at,s,t
// beq at,$zero,offs
slt(dest, lhs, rhs);
return Equal;
case LessThan:
// blt s,t,label =>
// slt at,s,t
// bne at,$zero,offs
slt(dest, lhs, rhs);
return NotEqual;
case LessThanOrEqual:
// ble s,t,label =>
// slt at,t,s
// beq at,$zero,offs
slt(dest, rhs, lhs);
return Equal;
default:
MOZ_CRASH("Invalid condition.");
}
return Always;
}
Assembler::Condition MacroAssemblerRiscv64::ma_cmp(Register dest, Register lhs,
Imm32 imm, Condition c) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
MOZ_RELEASE_ASSERT(lhs != scratch);
switch (c) {
case Above:
case BelowOrEqual:
if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12) &&
imm.value != -1) {
// lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
ma_sltu(dest, lhs, Operand(imm.value + 1));
return (c == BelowOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
sltu(dest, scratch, lhs);
return (c == BelowOrEqual ? Equal : NotEqual);
}
case AboveOrEqual:
case Below:
if (is_intn(imm.value, 12)) {
ma_sltu(dest, lhs, Operand(imm.value));
} else {
ma_li(scratch, imm);
sltu(dest, lhs, scratch);
}
return (c == AboveOrEqual ? Equal : NotEqual);
case GreaterThan:
case LessThanOrEqual:
if (imm.value != 0x7fffffff && is_intn(imm.value + 1, 12)) {
// lhs <= rhs via lhs < rhs + 1.
ma_slt(dest, lhs, Operand(imm.value + 1));
return (c == LessThanOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
slt(dest, scratch, lhs);
return (c == LessThanOrEqual ? Equal : NotEqual);
}
case GreaterThanOrEqual:
case LessThan:
if (is_intn(imm.value, 12)) {
ma_slt(dest, lhs, imm);
} else {
ma_li(scratch, imm);
slt(dest, lhs, scratch);
}
return (c == GreaterThanOrEqual ? Equal : NotEqual);
default:
MOZ_CRASH("Invalid condition.");
}
return Always;
}
void MacroAssemblerRiscv64::ma_cmp_set(Register rd, Register rj, Register rk,
Condition c) {
switch (c) {
case Equal:
// seq d,s,t =>
// xor d,s,t
// sltiu d,d,1
xor_(rd, rj, rk);
ma_sltu(rd, rd, Operand(1));
break;
case NotEqual:
// sne d,s,t =>
// xor d,s,t
// sltu d,$zero,d
xor_(rd, rj, rk);
sltu(rd, zero, rd);
break;
case Above:
// sgtu d,s,t =>
// sltu d,t,s
sltu(rd, rk, rj);
break;
case AboveOrEqual:
// sgeu d,s,t =>
// sltu d,s,t
// xori d,d,1
sltu(rd, rj, rk);
xori(rd, rd, 1);
break;
case Below:
// sltu d,s,t
sltu(rd, rj, rk);
break;
case BelowOrEqual:
// sleu d,s,t =>
// sltu d,t,s
// xori d,d,1
sltu(rd, rk, rj);
xori(rd, rd, 1);
break;
case GreaterThan:
// sgt d,s,t =>
// slt d,t,s
slt(rd, rk, rj);
break;
case GreaterThanOrEqual:
// sge d,s,t =>
// slt d,s,t
// xori d,d,1
slt(rd, rj, rk);
xori(rd, rd, 1);
break;
case LessThan:
// slt d,s,t
slt(rd, rj, rk);
break;
case LessThanOrEqual:
// sle d,s,t =>
// slt d,t,s
// xori d,d,1
slt(rd, rk, rj);
xori(rd, rd, 1);
break;
case Zero:
MOZ_ASSERT(rj == rk);
// seq d,s,$zero =>
// sltiu d,s,1
ma_sltu(rd, rj, Operand(1));
break;
case NonZero:
MOZ_ASSERT(rj == rk);
// sne d,s,$zero =>
// sltu d,$zero,s
sltu(rd, zero, rj);
break;
case Signed:
MOZ_ASSERT(rj == rk);
slt(rd, rj, zero);
break;
case NotSigned:
MOZ_ASSERT(rj == rk);
// sge d,s,$zero =>
// slt d,s,$zero
// xori d,d,1
slt(rd, rj, zero);
xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition.");
}
}
void MacroAssemblerRiscv64::ma_compareF32(Register rd, DoubleCondition cc,
FloatRegister cmp1,
FloatRegister cmp2) {
switch (cc) {
case DoubleEqualOrUnordered:
case DoubleEqual:
feq_s(rd, cmp1, cmp2);
break;
case DoubleNotEqualOrUnordered:
case DoubleNotEqual: {
Label done;
CompareIsNanF32(rd, cmp1, cmp2);
ma_branch(&done, Equal, rd, Operand(1));
feq_s(rd, cmp1, cmp2);
bind(&done);
NegateBool(rd, rd);
break;
}
case DoubleLessThanOrUnordered:
case DoubleLessThan:
flt_s(rd, cmp1, cmp2);
break;
case DoubleGreaterThanOrEqualOrUnordered:
case DoubleGreaterThanOrEqual:
fle_s(rd, cmp2, cmp1);
break;
case DoubleLessThanOrEqualOrUnordered:
case DoubleLessThanOrEqual:
fle_s(rd, cmp1, cmp2);
break;
case DoubleGreaterThanOrUnordered:
case DoubleGreaterThan:
flt_s(rd, cmp2, cmp1);
break;
case DoubleOrdered:
CompareIsNotNanF32(rd, cmp1, cmp2);
return;
case DoubleUnordered:
CompareIsNanF32(rd, cmp1, cmp2);
return;
}
if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
CompareIsNanF32(scratch, cmp1, cmp2);
or_(rd, rd, scratch);
}
}
void MacroAssemblerRiscv64::ma_compareF64(Register rd, DoubleCondition cc,
FloatRegister cmp1,
FloatRegister cmp2) {
switch (cc) {
case DoubleEqualOrUnordered:
case DoubleEqual:
feq_d(rd, cmp1, cmp2);
break;
case DoubleNotEqualOrUnordered:
case DoubleNotEqual: {
Label done;
CompareIsNanF64(rd, cmp1, cmp2);
ma_branch(&done, Equal, rd, Operand(1));
feq_d(rd, cmp1, cmp2);
bind(&done);
NegateBool(rd, rd);
} break;
case DoubleLessThanOrUnordered:
case DoubleLessThan:
flt_d(rd, cmp1, cmp2);
break;
case DoubleGreaterThanOrEqualOrUnordered:
case DoubleGreaterThanOrEqual:
fle_d(rd, cmp2, cmp1);
break;
case DoubleLessThanOrEqualOrUnordered:
case DoubleLessThanOrEqual:
fle_d(rd, cmp1, cmp2);
break;
case DoubleGreaterThanOrUnordered:
case DoubleGreaterThan:
flt_d(rd, cmp2, cmp1);
break;
case DoubleOrdered:
CompareIsNotNanF64(rd, cmp1, cmp2);
return;
case DoubleUnordered:
CompareIsNanF64(rd, cmp1, cmp2);
return;
}
if (cc >= FIRST_UNORDERED && cc <= LAST_UNORDERED) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
CompareIsNanF64(scratch, cmp1, cmp2);
or_(rd, rd, scratch);
}
}
void MacroAssemblerRiscv64Compat::movePtr(Register src, Register dest) {
mv(dest, src);
}
void MacroAssemblerRiscv64Compat::movePtr(ImmWord imm, Register dest) {
ma_li(dest, imm);
}
void MacroAssemblerRiscv64Compat::movePtr(ImmGCPtr imm, Register dest) {
ma_li(dest, imm);
}
void MacroAssemblerRiscv64Compat::movePtr(ImmPtr imm, Register dest) {
movePtr(ImmWord(uintptr_t(imm.value)), dest);
}
void MacroAssemblerRiscv64Compat::movePtr(wasm::SymbolicAddress imm,
Register dest) {
DEBUG_PRINTF("[ %s\n", __FUNCTION__);
BlockTrampolinePoolScope block_trampoline_pool(this, 8);
append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
ma_liPatchable(dest, ImmWord(-1), Li64);
DEBUG_PRINTF("]\n");
}
bool MacroAssemblerRiscv64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
asMasm().Push(ImmPtr(fakeReturnAddr));
asMasm().Push(FramePointer);
return true;
}
void MacroAssemblerRiscv64Compat::convertUInt32ToDouble(Register src,
FloatRegister dest) {
fcvt_d_wu(dest, src);
}
void MacroAssemblerRiscv64Compat::convertUInt64ToDouble(Register src,
FloatRegister dest) {
fcvt_d_lu(dest, src);
}
void MacroAssemblerRiscv64Compat::convertUInt32ToFloat32(Register src,
FloatRegister dest) {
fcvt_s_wu(dest, src);
}
void MacroAssemblerRiscv64Compat::convertDoubleToFloat32(FloatRegister src,
FloatRegister dest) {
fcvt_s_d(dest, src);
}
template <typename F>
void MacroAssemblerRiscv64::RoundHelper(FPURegister dst, FPURegister src,
FPURegister fpu_scratch,
FPURoundingMode frm) {
BlockTrampolinePoolScope block_trampoline_pool(this, 20);
UseScratchRegisterScope temps(this);
Register scratch2 = temps.Acquire();
MOZ_ASSERT((std::is_same<float, F>::value) ||
(std::is_same<double, F>::value));
// Need at least two FPRs, so check against dst == src == fpu_scratch
MOZ_ASSERT(!(dst == src && dst == fpu_scratch));
const int kFloatMantissaBits =
sizeof(F) == 4 ? kFloat32MantissaBits : kFloat64MantissaBits;
const int kFloatExponentBits =
sizeof(F) == 4 ? kFloat32ExponentBits : kFloat64ExponentBits;
const int kFloatExponentBias =
sizeof(F) == 4 ? kFloat32ExponentBias : kFloat64ExponentBias;
Label done;
{
UseScratchRegisterScope temps2(this);
Register scratch = temps2.Acquire();
// extract exponent value of the source floating-point to scratch
if (std::is_same<F, double>::value) {
fmv_x_d(scratch, src);
} else {
fmv_x_w(scratch, src);
}
ExtractBits(scratch2, scratch, kFloatMantissaBits, kFloatExponentBits);
}
// if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits
// in mantissa, the result is the same as src, so move src to dest (to avoid
// generating another branch)
if (dst != src) {
if (std::is_same<F, double>::value) {
fmv_d(dst, src);
} else {
fmv_s(dst, src);
}
}
{
Label not_NaN;
UseScratchRegisterScope temps2(this);
Register scratch = temps2.Acquire();
// According to the wasm spec
// if input is canonical NaN, then output is canonical NaN, and if input is
// any other NaN, then output is any NaN with most significant bit of
// payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
// src is not a NaN, branch to the label and do nothing, but if it is,
// fmin_d will set dst to the canonical NaN.
if (std::is_same<F, double>::value) {
feq_d(scratch, src, src);
bnez(scratch, &not_NaN);
fmin_d(dst, src, src);
} else {
feq_s(scratch, src, src);
bnez(scratch, &not_NaN);
fmin_s(dst, src, src);
}
bind(&not_NaN);
}
// If real exponent (i.e., scratch2 - kFloatExponentBias) is greater than
// kFloat32MantissaBits, it means the floating-point value has no fractional
// part, thus the input is already rounded, jump to done. Note that, NaN and
// Infinity in floating-point representation sets maximal exponent value, so
// they also satisfy (scratch2 - kFloatExponentBias >= kFloatMantissaBits),
// and JS round semantics specify that rounding of NaN (Infinity) returns NaN
// (Infinity), so NaN and Infinity are considered rounded value too.
ma_branch(&done, GreaterThanOrEqual, scratch2,
Operand(kFloatExponentBias + kFloatMantissaBits));
// Actual rounding is needed along this path
// old_src holds the original input, needed for the case of src == dst
FPURegister old_src = src;
if (src == dst) {
MOZ_ASSERT(fpu_scratch != dst);
fmv_d(fpu_scratch, src);
old_src = fpu_scratch;
}
// Since only input whose real exponent value is less than kMantissaBits
// (i.e., 23 or 52-bits) falls into this path, the value range of the input
// falls into that of 23- or 53-bit integers. So we round the input to integer
// values, then convert them back to floating-point.
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (std::is_same<F, double>::value) {
fcvt_l_d(scratch, src, frm);
fcvt_d_l(dst, scratch, frm);
} else {
fcvt_w_s(scratch, src, frm);
fcvt_s_w(dst, scratch, frm);
}
}
// A special handling is needed if the input is a very small positive/negative
// number that rounds to zero. JS semantics requires that the rounded result
// retains the sign of the input, so a very small positive (negative)
// floating-point number should be rounded to positive (negative) 0.
// Therefore, we use sign-bit injection to produce +/-0 correctly. Instead of
// testing for zero w/ a branch, we just insert sign-bit for everyone on this
// path (this is where old_src is needed)
if (std::is_same<F, double>::value) {
fsgnj_d(dst, dst, old_src);
} else {
fsgnj_s(dst, dst, old_src);
}
bind(&done);
}
template <typename CvtFunc>
void MacroAssemblerRiscv64::RoundFloatingPointToInteger(Register rd,
FPURegister fs,
Register result,
CvtFunc fcvt_generator,
bool Inexact) {
// Save csr_fflags to scratch & clear exception flags
if (result != Register::Invalid()) {
BlockTrampolinePoolScope block_trampoline_pool(this, 6);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
int exception_flags = kInvalidOperation;
if (Inexact) exception_flags |= kInexact;
csrrci(scratch, csr_fflags, exception_flags);
// actual conversion instruction
fcvt_generator(this, rd, fs);
// check kInvalidOperation flag (out-of-range, NaN)
// set result to 1 if normal, otherwise set result to 0 for abnormal
frflags(result);
andi(result, result, exception_flags);
seqz(result, result); // result <-- 1 (normal), result <-- 0 (abnormal)
// restore csr_fflags
csrw(csr_fflags, scratch);
} else {
// actual conversion instruction
fcvt_generator(this, rd, fs);
}
}
void MacroAssemblerRiscv64::Trunc_uw_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_wu_d(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_w_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_d(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_uw_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_wu_s(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_w_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_s(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_ul_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_lu_d(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_l_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_l_d(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_ul_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_lu_s(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Trunc_l_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_l_s(dst, src, RTZ);
},
Inexact);
}
void MacroAssemblerRiscv64::Floor_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RDN);
}
void MacroAssemblerRiscv64::Ceil_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RUP);
}
void MacroAssemblerRiscv64::Trunc_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RTZ);
}
void MacroAssemblerRiscv64::Round_d_d(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<double>(dst, src, fpu_scratch, RNE);
}
void MacroAssemblerRiscv64::Floor_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<float>(dst, src, fpu_scratch, RDN);
}
void MacroAssemblerRiscv64::Ceil_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<float>(dst, src, fpu_scratch, RUP);
}
void MacroAssemblerRiscv64::Trunc_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<float>(dst, src, fpu_scratch, RTZ);
}
void MacroAssemblerRiscv64::Round_s_s(FPURegister dst, FPURegister src,
FPURegister fpu_scratch) {
RoundHelper<float>(dst, src, fpu_scratch, RNE);
}
void MacroAssemblerRiscv64::Round_w_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_s(dst, src, RNE);
},
Inexact);
}
void MacroAssemblerRiscv64::Round_w_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_d(dst, src, RNE);
},
Inexact);
}
void MacroAssemblerRiscv64::Ceil_w_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_s(dst, src, RUP);
},
Inexact);
}
void MacroAssemblerRiscv64::Ceil_w_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_d(dst, src, RUP);
},
Inexact);
}
void MacroAssemblerRiscv64::Floor_w_s(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_s(dst, src, RDN);
},
Inexact);
}
void MacroAssemblerRiscv64::Floor_w_d(Register rd, FPURegister fs,
Register result, bool Inexact) {
RoundFloatingPointToInteger(
rd, fs, result,
[](MacroAssemblerRiscv64* masm, Register dst, FPURegister src) {
masm->fcvt_w_d(dst, src, RDN);
},
Inexact);
}
// Checks whether a double is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerRiscv64Compat::convertDoubleToInt32(FloatRegister src,
Register dest,
Label* fail,
bool negativeZeroCheck) {
if (negativeZeroCheck) {
fclass_d(dest, src);
ma_b(dest, Imm32(kNegativeZero), fail, Equal);
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Trunc_w_d(dest, src, scratch, true);
ma_b(scratch, Imm32(0), fail, Equal);
}
void MacroAssemblerRiscv64Compat::convertDoubleToPtr(FloatRegister src,
Register dest, Label* fail,
bool negativeZeroCheck) {
if (negativeZeroCheck) {
fclass_d(dest, src);
ma_b(dest, Imm32(kNegativeZero), fail, Equal);
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Trunc_l_d(dest, src, scratch, true);
ma_b(scratch, Imm32(0), fail, Equal);
}
// Checks whether a float32 is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerRiscv64Compat::convertFloat32ToInt32(
FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
if (negativeZeroCheck) {
fclass_d(dest, src);
ma_b(dest, Imm32(kNegativeZero), fail, Equal);
}
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Trunc_w_s(dest, src, scratch, true);
ma_b(scratch, Imm32(0), fail, Equal);
}
void MacroAssemblerRiscv64Compat::convertFloat32ToDouble(FloatRegister src,
FloatRegister dest) {
fcvt_d_s(dest, src);
}
void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(Register src,
FloatRegister dest) {
fcvt_s_w(dest, src);
}
void MacroAssemblerRiscv64Compat::convertInt32ToFloat32(const Address& src,
FloatRegister dest) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
load32(src, scratch);
fcvt_s_w(dest, scratch);
}
void MacroAssemblerRiscv64Compat::movq(Register rj, Register rd) { mv(rd, rj); }
// Memory.
void MacroAssemblerRiscv64::ma_loadDouble(FloatRegister dest, Address address) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fld(dest, base, encodedOffset);
}
void MacroAssemblerRiscv64::ma_loadFloat(FloatRegister dest, Address address) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
flw(dest, base, encodedOffset);
}
void MacroAssemblerRiscv64::ma_load(Register dest, Address address,
LoadStoreSize size,
LoadStoreExtension extension) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
switch (size) {
case SizeByte:
if (ZeroExtend == extension) {
lbu(dest, base, encodedOffset);
} else {
lb(dest, base, encodedOffset);
}
break;
case SizeHalfWord:
if (ZeroExtend == extension) {
lhu(dest, base, encodedOffset);
} else {
lh(dest, base, encodedOffset);
}
break;
case SizeWord:
if (ZeroExtend == extension) {
lwu(dest, base, encodedOffset);
} else {
lw(dest, base, encodedOffset);
}
break;
case SizeDouble:
ld(dest, base, encodedOffset);
break;
default:
MOZ_CRASH("Invalid argument for ma_load");
}
}
void MacroAssemblerRiscv64::ma_store(Register data, const BaseIndex& dest,
LoadStoreSize size,
LoadStoreExtension extension) {
UseScratchRegisterScope temps(this);
Register scratch2 = temps.Acquire();
asMasm().computeScaledAddress(dest, scratch2);
asMasm().ma_store(data, Address(scratch2, dest.offset), size, extension);
}
void MacroAssemblerRiscv64::ma_store(Imm32 imm, const BaseIndex& dest,
LoadStoreSize size,
LoadStoreExtension extension) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register address = temps.Acquire();
// Make sure that scratch contains absolute address so that
// offset is 0.
computeScaledAddress(dest, address);
// Scrach register is free now, use it for loading imm value
ma_li(scratch, imm);
// with offset=0 ScratchRegister will not be used in ma_store()
// so we can use it as a parameter here
ma_store(scratch, Address(address, 0), size, extension);
}
void MacroAssemblerRiscv64::ma_store(Imm32 imm, Address address,
LoadStoreSize size,
LoadStoreExtension extension) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ma_li(scratch, imm);
ma_store(scratch, address, size, extension);
}
void MacroAssemblerRiscv64::ma_store(Register data, Address address,
LoadStoreSize size,
LoadStoreExtension extension) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
switch (size) {
case SizeByte:
sb(data, base, encodedOffset);
break;
case SizeHalfWord:
sh(data, base, encodedOffset);
break;
case SizeWord:
sw(data, base, encodedOffset);
break;
case SizeDouble:
sd(data, base, encodedOffset);
break;
default:
MOZ_CRASH("Invalid argument for ma_store");
}
}
// Memory.
void MacroAssemblerRiscv64::ma_storeDouble(FloatRegister dest,
Address address) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fsd(dest, base, encodedOffset);
}
void MacroAssemblerRiscv64::ma_storeFloat(FloatRegister dest, Address address) {
int16_t encodedOffset;
Register base;
if (!is_int12(address.offset)) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, Imm32(address.offset));
add(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fsw(dest, base, encodedOffset);
}
void MacroAssemblerRiscv64::computeScaledAddress(const BaseIndex& address,
Register dest) {
Register base = address.base;
Register index = address.index;
int32_t shift = Imm32::ShiftOf(address.scale).value;
UseScratchRegisterScope temps(this);
Register tmp = dest == base ? temps.Acquire() : dest;
if (shift) {
MOZ_ASSERT(shift <= 4);
slli(tmp, index, shift);
add(dest, base, tmp);
} else {
add(dest, base, index);
}
}
void MacroAssemblerRiscv64Compat::wasmLoadI64Impl(
const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
Register ptrScratch, Register64 output, Register tmp) {
uint32_t offset = access.offset();
MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
// Maybe add the offset.
if (offset) {
asMasm().addPtr(ImmWord(offset), ptrScratch);
ptr = ptrScratch;
}
asMasm().memoryBarrierBefore(access.sync());
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
switch (access.type()) {
case Scalar::Int8:
add(ScratchRegister, memoryBase, ptr);
lb(output.reg, ScratchRegister, 0);
break;
case Scalar::Uint8:
add(ScratchRegister, memoryBase, ptr);
lbu(output.reg, ScratchRegister, 0);
break;
case Scalar::Int16:
add(ScratchRegister, memoryBase, ptr);
lh(output.reg, ScratchRegister, 0);
break;
case Scalar::Uint16:
add(ScratchRegister, memoryBase, ptr);
lhu(output.reg, ScratchRegister, 0);
break;
case Scalar::Int32:
add(ScratchRegister, memoryBase, ptr);
lw(output.reg, ScratchRegister, 0);
break;
case Scalar::Uint32:
// TODO(loong64): Why need zero-extension here?
add(ScratchRegister, memoryBase, ptr);
lwu(output.reg, ScratchRegister, 0);
break;
case Scalar::Int64:
add(ScratchRegister, memoryBase, ptr);
ld(output.reg, ScratchRegister, 0);
break;
default:
MOZ_CRASH("unexpected array type");
}
asMasm().append(access, asMasm().size() - 4);
asMasm().memoryBarrierAfter(access.sync());
}
void MacroAssemblerRiscv64Compat::wasmStoreI64Impl(
const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
Register ptr, Register ptrScratch, Register tmp) {
uint32_t offset = access.offset();
MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
// Maybe add the offset.
if (offset) {
asMasm().addPtr(ImmWord(offset), ptrScratch);
ptr = ptrScratch;
}
asMasm().memoryBarrierBefore(access.sync());
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
switch (access.type()) {
case Scalar::Int8:
case Scalar::Uint8:
add(ScratchRegister, memoryBase, ptr);
sb(value.reg, ScratchRegister, 0);
break;
case Scalar::Int16:
case Scalar::Uint16:
add(ScratchRegister, memoryBase, ptr);
sh(value.reg, ScratchRegister, 0);
break;
case Scalar::Int32:
case Scalar::Uint32:
add(ScratchRegister, memoryBase, ptr);
sw(value.reg, ScratchRegister, 0);
break;
case Scalar::Int64:
add(ScratchRegister, memoryBase, ptr);
sd(value.reg, ScratchRegister, 0);
break;
default:
MOZ_CRASH("unexpected array type");
}
asMasm().append(access, asMasm().size() - 4);
asMasm().memoryBarrierAfter(access.sync());
}
void MacroAssemblerRiscv64Compat::profilerEnterFrame(Register framePtr,
Register scratch) {
asMasm().loadJSContext(scratch);
loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
storePtr(framePtr,
Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
storePtr(ImmPtr(nullptr),
Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
}
void MacroAssemblerRiscv64Compat::profilerExitFrame() {
jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
}
void MacroAssemblerRiscv64Compat::move32(Imm32 imm, Register dest) {
ma_li(dest, imm);
}
void MacroAssemblerRiscv64Compat::move32(Register src, Register dest) {
slliw(dest, src, 0);
}
void MacroAssemblerRiscv64Compat::load8ZeroExtend(const Address& address,
Register dest) {
ma_load(dest, address, SizeByte, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::load8ZeroExtend(const BaseIndex& src,
Register dest) {
ma_load(dest, src, SizeByte, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::load8SignExtend(const Address& address,
Register dest) {
ma_load(dest, address, SizeByte, SignExtend);
}
void MacroAssemblerRiscv64Compat::load8SignExtend(const BaseIndex& src,
Register dest) {
ma_load(dest, src, SizeByte, SignExtend);
}
void MacroAssemblerRiscv64Compat::load16ZeroExtend(const Address& address,
Register dest) {
ma_load(dest, address, SizeHalfWord, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::load16ZeroExtend(const BaseIndex& src,
Register dest) {
ma_load(dest, src, SizeHalfWord, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::load16SignExtend(const Address& address,
Register dest) {
ma_load(dest, address, SizeHalfWord, SignExtend);
}
void MacroAssemblerRiscv64Compat::load16SignExtend(const BaseIndex& src,
Register dest) {
ma_load(dest, src, SizeHalfWord, SignExtend);
}
void MacroAssemblerRiscv64Compat::load32(const Address& address,
Register dest) {
ma_load(dest, address, SizeWord);
}
void MacroAssemblerRiscv64Compat::load32(const BaseIndex& address,
Register dest) {
ma_load(dest, address, SizeWord);
}
void MacroAssemblerRiscv64Compat::load32(AbsoluteAddress address,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(ImmPtr(address.addr), ScratchRegister);
load32(Address(ScratchRegister, 0), dest);
}
void MacroAssemblerRiscv64Compat::load32(wasm::SymbolicAddress address,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(address, ScratchRegister);
load32(Address(ScratchRegister, 0), dest);
}
void MacroAssemblerRiscv64Compat::loadPtr(const Address& address,
Register dest) {
ma_load(dest, address, SizeDouble);
}
void MacroAssemblerRiscv64Compat::loadPtr(const BaseIndex& src, Register dest) {
ma_load(dest, src, SizeDouble);
}
void MacroAssemblerRiscv64Compat::loadPtr(AbsoluteAddress address,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(ImmPtr(address.addr), ScratchRegister);
loadPtr(Address(ScratchRegister, 0), dest);
}
void MacroAssemblerRiscv64Compat::loadPtr(wasm::SymbolicAddress address,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(address, ScratchRegister);
loadPtr(Address(ScratchRegister, 0), dest);
}
void MacroAssemblerRiscv64Compat::loadPrivate(const Address& address,
Register dest) {
loadPtr(address, dest);
}
void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const Address& address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, imm);
ma_store(ScratchRegister, address, SizeByte);
}
void MacroAssemblerRiscv64Compat::store8(Register src, const Address& address) {
ma_store(src, address, SizeByte);
}
void MacroAssemblerRiscv64Compat::store8(Imm32 imm, const BaseIndex& dest) {
ma_store(imm, dest, SizeByte);
}
void MacroAssemblerRiscv64Compat::store8(Register src, const BaseIndex& dest) {
ma_store(src, dest, SizeByte);
}
void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const Address& address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, imm);
ma_store(ScratchRegister, address, SizeHalfWord);
}
void MacroAssemblerRiscv64Compat::store16(Register src,
const Address& address) {
ma_store(src, address, SizeHalfWord);
}
void MacroAssemblerRiscv64Compat::store16(Imm32 imm, const BaseIndex& dest) {
ma_store(imm, dest, SizeHalfWord);
}
void MacroAssemblerRiscv64Compat::store16(Register src,
const BaseIndex& address) {
ma_store(src, address, SizeHalfWord);
}
void MacroAssemblerRiscv64Compat::store32(Register src,
AbsoluteAddress address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(ImmPtr(address.addr), ScratchRegister);
store32(src, Address(ScratchRegister, 0));
}
void MacroAssemblerRiscv64Compat::store32(Register src,
const Address& address) {
ma_store(src, address, SizeWord);
}
void MacroAssemblerRiscv64Compat::store32(Imm32 src, const Address& address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
move32(src, ScratchRegister);
ma_store(ScratchRegister, address, SizeWord);
}
void MacroAssemblerRiscv64Compat::store32(Imm32 imm, const BaseIndex& dest) {
ma_store(imm, dest, SizeWord);
}
void MacroAssemblerRiscv64Compat::store32(Register src, const BaseIndex& dest) {
ma_store(src, dest, SizeWord);
}
template <typename T>
void MacroAssemblerRiscv64Compat::storePtr(ImmWord imm, T address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
ma_li(ScratchRegister, imm);
ma_store(ScratchRegister, address, SizeDouble);
}
template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmWord imm,
Address address);
template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
ImmWord imm, BaseIndex address);
template <typename T>
void MacroAssemblerRiscv64Compat::storePtr(ImmPtr imm, T address) {
storePtr(ImmWord(uintptr_t(imm.value)), address);
}
template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmPtr imm,
Address address);
template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
ImmPtr imm, BaseIndex address);
template <typename T>
void MacroAssemblerRiscv64Compat::storePtr(ImmGCPtr imm, T address) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(imm, ScratchRegister);
storePtr(ScratchRegister, address);
}
template void MacroAssemblerRiscv64Compat::storePtr<Address>(ImmGCPtr imm,
Address address);
template void MacroAssemblerRiscv64Compat::storePtr<BaseIndex>(
ImmGCPtr imm, BaseIndex address);
void MacroAssemblerRiscv64Compat::storePtr(Register src,
const Address& address) {
ma_store(src, address, SizeDouble);
}
void MacroAssemblerRiscv64Compat::storePtr(Register src,
const BaseIndex& address) {
ma_store(src, address, SizeDouble);
}
void MacroAssemblerRiscv64Compat::storePtr(Register src, AbsoluteAddress dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
movePtr(ImmPtr(dest.addr), ScratchRegister);
storePtr(src, Address(ScratchRegister, 0));
}
void MacroAssemblerRiscv64Compat::testNullSet(Condition cond,
const ValueOperand& value,
Register dest) {
MOZ_ASSERT(cond == Equal || cond == NotEqual);
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
splitTag(value, ScratchRegister);
ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_NULL), cond);
}
void MacroAssemblerRiscv64Compat::testObjectSet(Condition cond,
const ValueOperand& value,
Register dest) {
MOZ_ASSERT(cond == Equal || cond == NotEqual);
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
splitTag(value, ScratchRegister);
ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_OBJECT), cond);
}
void MacroAssemblerRiscv64Compat::testUndefinedSet(Condition cond,
const ValueOperand& value,
Register dest) {
MOZ_ASSERT(cond == Equal || cond == NotEqual);
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
splitTag(value, ScratchRegister);
ma_cmp_set(dest, ScratchRegister, ImmTag(JSVAL_TAG_UNDEFINED), cond);
}
void MacroAssemblerRiscv64Compat::unboxInt32(const ValueOperand& operand,
Register dest) {
slliw(dest, operand.valueReg(), 0);
}
void MacroAssemblerRiscv64Compat::unboxInt32(Register src, Register dest) {
slliw(dest, src, 0);
}
void MacroAssemblerRiscv64Compat::unboxInt32(const Address& src,
Register dest) {
load32(Address(src.base, src.offset), dest);
}
void MacroAssemblerRiscv64Compat::unboxInt32(const BaseIndex& src,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
computeScaledAddress(src, ScratchRegister);
load32(Address(ScratchRegister, src.offset), dest);
}
void MacroAssemblerRiscv64Compat::unboxBoolean(const ValueOperand& operand,
Register dest) {
ExtractBits(dest, operand.valueReg(), 0, 32);
}
void MacroAssemblerRiscv64Compat::unboxBoolean(Register src, Register dest) {
ExtractBits(dest, src, 0, 32);
}
void MacroAssemblerRiscv64Compat::unboxBoolean(const Address& src,
Register dest) {
ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::unboxBoolean(const BaseIndex& src,
Register dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
computeScaledAddress(src, ScratchRegister);
ma_load(dest, Address(ScratchRegister, src.offset), SizeWord, ZeroExtend);
}
void MacroAssemblerRiscv64Compat::unboxDouble(const ValueOperand& operand,
FloatRegister dest) {
fmv_d_x(dest, operand.valueReg());
}
void MacroAssemblerRiscv64Compat::unboxDouble(const Address& src,
FloatRegister dest) {
ma_loadDouble(dest, Address(src.base, src.offset));
}
void MacroAssemblerRiscv64Compat::unboxDouble(const BaseIndex& src,
FloatRegister dest) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
loadPtr(src, scratch);
unboxDouble(ValueOperand(scratch), dest);
}
void MacroAssemblerRiscv64Compat::unboxString(const ValueOperand& operand,
Register dest) {
unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
}
void MacroAssemblerRiscv64Compat::unboxString(Register src, Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
}
void MacroAssemblerRiscv64Compat::unboxString(const Address& src,
Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
}
void MacroAssemblerRiscv64Compat::unboxSymbol(const ValueOperand& operand,
Register dest) {
unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
}
void MacroAssemblerRiscv64Compat::unboxSymbol(Register src, Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
}
void MacroAssemblerRiscv64Compat::unboxSymbol(const Address& src,
Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
}
void MacroAssemblerRiscv64Compat::unboxBigInt(const ValueOperand& operand,
Register dest) {
unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
}
void MacroAssemblerRiscv64Compat::unboxBigInt(Register src, Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
}
void MacroAssemblerRiscv64Compat::unboxBigInt(const Address& src,
Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
}
void MacroAssemblerRiscv64Compat::unboxObject(const ValueOperand& src,
Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
}
void MacroAssemblerRiscv64Compat::unboxObject(Register src, Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
}
void MacroAssemblerRiscv64Compat::unboxObject(const Address& src,
Register dest) {
unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
}
void MacroAssemblerRiscv64Compat::unboxValue(const ValueOperand& src,
AnyRegister dest,
JSValueType type) {
if (dest.isFloat()) {
Label notInt32, end;
asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
convertInt32ToDouble(src.valueReg(), dest.fpu());
ma_branch(&end);
bind(&notInt32);
unboxDouble(src, dest.fpu());
bind(&end);
} else {
unboxNonDouble(src, dest.gpr(), type);
}
}
void MacroAssemblerRiscv64Compat::boxDouble(FloatRegister src,
const ValueOperand& dest,
FloatRegister) {
fmv_x_d(dest.valueReg(), src);
}
void MacroAssemblerRiscv64Compat::boxNonDouble(JSValueType type, Register src,
const ValueOperand& dest) {
MOZ_ASSERT(src != dest.valueReg());
boxValue(type, src, dest.valueReg());
}
void MacroAssemblerRiscv64Compat::boolValueToDouble(const ValueOperand& operand,
FloatRegister dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
convertBoolToInt32(operand.valueReg(), ScratchRegister);
convertInt32ToDouble(ScratchRegister, dest);
}
void MacroAssemblerRiscv64Compat::int32ValueToDouble(
const ValueOperand& operand, FloatRegister dest) {
convertInt32ToDouble(operand.valueReg(), dest);
}
void MacroAssemblerRiscv64Compat::boolValueToFloat32(
const ValueOperand& operand, FloatRegister dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
convertBoolToInt32(operand.valueReg(), ScratchRegister);
convertInt32ToFloat32(ScratchRegister, dest);
}
void MacroAssemblerRiscv64Compat::int32ValueToFloat32(
const ValueOperand& operand, FloatRegister dest) {
convertInt32ToFloat32(operand.valueReg(), dest);
}
void MacroAssemblerRiscv64Compat::loadConstantFloat32(float f,
FloatRegister dest) {
ma_lis(dest, f);
}
void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const Address& src,
FloatRegister dest) {
Label notInt32, end;
// If it's an int, convert it to double.
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
Register SecondScratchReg = temps.Acquire();
loadPtr(Address(src.base, src.offset), ScratchRegister);
srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
loadPtr(Address(src.base, src.offset), SecondScratchReg);
convertInt32ToDouble(SecondScratchReg, dest);
ma_branch(&end);
// Not an int, just load as double.
bind(&notInt32);
unboxDouble(src, dest);
bind(&end);
}
void MacroAssemblerRiscv64Compat::loadInt32OrDouble(const BaseIndex& addr,
FloatRegister dest) {
Label notInt32, end;
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
Register SecondScratchReg = temps.Acquire();
// If it's an int, convert it to double.
computeScaledAddress(addr, SecondScratchReg);
// Since we only have one scratch, we need to stomp over it with the tag.
loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
srli(SecondScratchReg, ScratchRegister, JSVAL_TAG_SHIFT);
asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
computeScaledAddress(addr, SecondScratchReg);
loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
convertInt32ToDouble(SecondScratchReg, dest);
ma_branch(&end);
// Not an int, just load as double.
bind(&notInt32);
// First, recompute the offset that had been stored in the scratch register
// since the scratch register was overwritten loading in the type.
computeScaledAddress(addr, SecondScratchReg);
unboxDouble(Address(SecondScratchReg, 0), dest);
bind(&end);
}
void MacroAssemblerRiscv64Compat::loadConstantDouble(double dp,
FloatRegister dest) {
ma_lid(dest, dp);
}
Register MacroAssemblerRiscv64Compat::extractObject(const Address& address,
Register scratch) {
loadPtr(Address(address.base, address.offset), scratch);
ExtractBits(scratch, scratch, 0, JSVAL_TAG_SHIFT);
return scratch;
}
Register MacroAssemblerRiscv64Compat::extractTag(const Address& address,
Register scratch) {
loadPtr(Address(address.base, address.offset), scratch);
ExtractBits(scratch, scratch, JSVAL_TAG_SHIFT, 64 - JSVAL_TAG_SHIFT);
return scratch;
}
Register MacroAssemblerRiscv64Compat::extractTag(const BaseIndex& address,
Register scratch) {
computeScaledAddress(address, scratch);
return extractTag(Address(scratch, address.offset), scratch);
}
/////////////////////////////////////////////////////////////////
// X86/X64-common/ARM/LoongArch interface.
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
// X86/X64-common/ARM/MIPS interface.
/////////////////////////////////////////////////////////////////
void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
const BaseIndex& dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
computeScaledAddress(dest, ScratchRegister);
storeValue(val, Address(ScratchRegister, dest.offset));
}
void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
BaseIndex dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
computeScaledAddress(dest, ScratchRegister);
int32_t offset = dest.offset;
if (!is_int12(offset)) {
UseScratchRegisterScope temps(this);
Register SecondScratchReg = temps.Acquire();
ma_li(SecondScratchReg, Imm32(offset));
add(ScratchRegister, ScratchRegister, SecondScratchReg);
offset = 0;
}
storeValue(type, reg, Address(ScratchRegister, offset));
}
void MacroAssemblerRiscv64Compat::storeValue(ValueOperand val,
const Address& dest) {
storePtr(val.valueReg(), Address(dest.base, dest.offset));
}
void MacroAssemblerRiscv64Compat::storeValue(JSValueType type, Register reg,
Address dest) {
if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
store32(reg, dest);
JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
} else {
ScratchRegisterScope SecondScratchReg(asMasm());
MOZ_ASSERT(dest.base != SecondScratchReg);
ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
slli(SecondScratchReg, SecondScratchReg, JSVAL_TAG_SHIFT);
InsertBits(SecondScratchReg, reg, 0, JSVAL_TAG_SHIFT);
storePtr(SecondScratchReg, Address(dest.base, dest.offset));
}
}
void MacroAssemblerRiscv64Compat::storeValue(const Value& val, Address dest) {
UseScratchRegisterScope temps(this);
Register SecondScratchReg = temps.Acquire();
if (val.isGCThing()) {
writeDataRelocation(val);
movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
} else {
ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
}
storePtr(SecondScratchReg, Address(dest.base, dest.offset));
}
void MacroAssemblerRiscv64Compat::storeValue(const Value& val, BaseIndex dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
Register SecondScratchReg = temps.Acquire();
computeScaledAddress(dest, ScratchRegister);
int32_t offset = dest.offset;
if (!is_int12(offset)) {
ma_li(SecondScratchReg, Imm32(offset));
add(ScratchRegister, ScratchRegister, SecondScratchReg);
offset = 0;
}
storeValue(val, Address(ScratchRegister, offset));
}
void MacroAssemblerRiscv64Compat::loadValue(const BaseIndex& addr,
ValueOperand val) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
computeScaledAddress(addr, ScratchRegister);
loadValue(Address(ScratchRegister, addr.offset), val);
}
void MacroAssemblerRiscv64Compat::loadValue(Address src, ValueOperand val) {
loadPtr(Address(src.base, src.offset), val.valueReg());
}
void MacroAssemblerRiscv64Compat::tagValue(JSValueType type, Register payload,
ValueOperand dest) {
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
MOZ_ASSERT(dest.valueReg() != ScratchRegister);
JitSpew(JitSpew_Codegen, "[ tagValue");
if (payload != dest.valueReg()) {
mv(dest.valueReg(), payload);
}
ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
InsertBits(dest.valueReg(), ScratchRegister, JSVAL_TAG_SHIFT,
64 - JSVAL_TAG_SHIFT);
if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
InsertBits(dest.valueReg(), zero, 32, JSVAL_TAG_SHIFT - 32);
}
JitSpew(JitSpew_Codegen, "]");
}
void MacroAssemblerRiscv64Compat::pushValue(ValueOperand val) {
// Allocate stack slots for Value. One for each.
asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
// Store Value
storeValue(val, Address(StackPointer, 0));
}
void MacroAssemblerRiscv64Compat::pushValue(const Address& addr) {
// Load value before allocate stack, addr.base may be is sp.
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
loadPtr(Address(addr.base, addr.offset), ScratchRegister);
ma_sub64(StackPointer, StackPointer, Imm32(sizeof(Value)));
storePtr(ScratchRegister, Address(StackPointer, 0));
}
void MacroAssemblerRiscv64Compat::popValue(ValueOperand val) {
ld(val.valueReg(), StackPointer, 0);
ma_add64(StackPointer, StackPointer, Imm32(sizeof(Value)));
}
void MacroAssemblerRiscv64Compat::breakpoint(uint32_t value) { break_(value); }
void MacroAssemblerRiscv64Compat::ensureDouble(const ValueOperand& source,
FloatRegister dest,
Label* failure) {
Label isDouble, done;
{
ScratchTagScope tag(asMasm(), source);
splitTagForTest(source, tag);
asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
}
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
unboxInt32(source, ScratchRegister);
convertInt32ToDouble(ScratchRegister, dest);
jump(&done);
bind(&isDouble);
unboxDouble(source, dest);
bind(&done);
}
void MacroAssemblerRiscv64Compat::handleFailureWithHandlerTail(
Label* profilerExitTail, Label* bailoutTail) {
// Reserve space for exception information.
int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
~(ABIStackAlignment - 1);
asMasm().subPtr(Imm32(size), StackPointer);
mv(a0, StackPointer); // Use a0 since it is a first function argument
// Call the handler.
using Fn = void (*)(ResumeFromException* rfe);
asMasm().setupUnalignedABICall(a1);
asMasm().passABIArg(a0);
asMasm().callWithABI<Fn, HandleException>(
MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
Label entryFrame;
Label catch_;
Label finally;
Label returnBaseline;
Label returnIon;
Label bailout;
Label wasm;
Label wasmCatch;
// Already clobbered a0, so use it...
load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
asMasm().branch32(Assembler::Equal, a0,
Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
&catch_);
asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
&finally);
asMasm().branch32(Assembler::Equal, a0,
Imm32(ExceptionResumeKind::ForcedReturnBaseline),
&returnBaseline);
asMasm().branch32(Assembler::Equal, a0,
Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
&bailout);
asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
&wasm);
asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
&wasmCatch);
breakpoint(); // Invalid kind.
// No exception handler. Load the error value, restore state and return from
// the entry frame.
bind(&entryFrame);
asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
// We're going to be returning by the ion calling convention
ma_pop(ra);
jump(ra);
nop();
// If we found a catch handler, this must be a baseline frame. Restore
// state and jump to the catch block.
bind(&catch_);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
jump(a0);
// If we found a finally block, this must be a baseline frame. Push two
// values expected by the finally block: the exception and BooleanValue(true).
bind(&finally);
ValueOperand exception = ValueOperand(a1);
loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
pushValue(exception);
pushValue(BooleanValue(true));
jump(a0);
// Return BaselineFrame->returnValue() to the caller.
// Used in debug mode and for GeneratorReturn.
Label profilingInstrumentation;
bind(&returnBaseline);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
JSReturnOperand);
jump(&profilingInstrumentation);
// Return the given value to the caller.
bind(&returnIon);
loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
JSReturnOperand);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
// If profiling is enabled, then update the lastProfilingFrame to refer to
// caller frame before returning. This code is shared by ForcedReturnIon
// and ForcedReturnBaseline.
bind(&profilingInstrumentation);
{
Label skipProfilingInstrumentation;
// Test if profiler enabled.
AbsoluteAddress addressOfEnabled(
asMasm().runtime()->geckoProfiler().addressOfEnabled());
asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
&skipProfilingInstrumentation);
jump(profilerExitTail);
bind(&skipProfilingInstrumentation);
}
mv(StackPointer, FramePointer);
pop(FramePointer);
ret();
// If we are bailing out to baseline to handle an exception, jump to
// the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
bind(&bailout);
loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
ma_li(ReturnReg, Imm32(1));
jump(bailoutTail);
// If we are throwing and the innermost frame was a wasm frame, reset SP and
// FP; SP is pointing to the unwound return address to the wasm entry, so
// we can just ret().
bind(&wasm);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
ret();
// Found a wasm catch handler, restore state and jump to it.
bind(&wasmCatch);
loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
FramePointer);
loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
StackPointer);
jump(a1);
}
CodeOffset MacroAssemblerRiscv64Compat::toggledJump(Label* label) {
CodeOffset ret(nextOffset().getOffset());
BranchShort(label);
return ret;
}
CodeOffset MacroAssemblerRiscv64Compat::toggledCall(JitCode* target,
bool enabled) {
DEBUG_PRINTF("\ttoggledCall\n");
UseScratchRegisterScope temps(this);
Register ScratchRegister = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this, 8);
BufferOffset bo = nextOffset();
CodeOffset offset(bo.getOffset());
addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
if (enabled) {
jalr(ScratchRegister);
} else {
nop();
}
MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
ToggledCallSize(nullptr));
return offset;
}
void MacroAssembler::subFromStackPtr(Imm32 imm32) {
if (imm32.value) {
asMasm().subPtr(imm32, StackPointer);
}
}
void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
JitSpew(JitSpew_Codegen, "[ clampDoubleToUint8");
Label nan, done;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
feq_d(scratch, input, input);
beqz(scratch, &nan);
addi(zero, scratch, 0x11);
Round_w_d(output, input);
clampIntToUint8(output);
ma_branch(&done);
// Input is nan
bind(&nan);
mv(output, zero_reg);
bind(&done);
JitSpew(JitSpew_Codegen, "]");
}
//{{{ check_macroassembler_style
// ===============================================================
// MacroAssembler high-level usage.
bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
CodeOffset MacroAssembler::call(Label* label) {
BranchAndLink(label);
return CodeOffset(currentOffset());
}
CodeOffset MacroAssembler::call(Register reg) {
jalr(reg, 0);
return CodeOffset(currentOffset());
}
CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
UseScratchRegisterScope temps(this);
temps.Exclude(GeneralRegisterSet(1 << CallReg.code()));
movePtr(target, CallReg);
return call(CallReg);
}
CodeOffset MacroAssembler::farJumpWithPatch() {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
// Allocate space which will be patched by patchFarJump().
CodeOffset farJump(nextInstrOffset(5).getOffset());
auipc(scratch, 0);
lw(scratch2, scratch, 4 * sizeof(Instr));
add(scratch, scratch, scratch2);
jr(scratch, 0);
spew(".space 32bit initValue 0xffff ffff");
emit(UINT32_MAX);
return farJump;
}
CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
return movWithPatch(ImmPtr(nullptr), dest);
}
CodeOffset MacroAssembler::nopPatchableToCall() {
BlockTrampolinePoolScope block_trampoline_pool(this, 7);
// riscv64
nop(); // lui(rd, (int32_t)high_20);
nop(); // addi(rd, rd, low_12); // 31 bits in rd.
nop(); // slli(rd, rd, 11); // Space for next 11 bis
nop(); // ori(rd, rd, b11); // 11 bits are put in. 42 bit in rd
nop(); // slli(rd, rd, 6); // Space for next 6 bits
nop(); // ori(rd, rd, a6); // 6 bits are put in. 48 bis in rd
nop(); // jirl
return CodeOffset(currentOffset());
}
CodeOffset MacroAssembler::wasmTrapInstruction() {
CodeOffset offset(currentOffset());
BlockTrampolinePoolScope block_trampoline_pool(this, 2);
break_(kWasmTrapCode); // TODO: teq(zero, zero, WASM_TRAP)
return offset;
}
size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
}
template <typename T>
void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
const T& value, Register temp,
Label* label) {
MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
Label done;
branchTestGCThing(Assembler::NotEqual, value,
cond == Assembler::Equal ? &done : label);
// temp may be InvalidReg, use scratch2 instead.