Source code

Revision control

Copy as Markdown

Other Tools

/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/loong64/MacroAssembler-loong64.h"
#include "jsmath.h"
#include "jit/Bailouts.h"
#include "jit/BaselineFrame.h"
#include "jit/JitFrames.h"
#include "jit/JitRuntime.h"
#include "jit/loong64/SharedICRegisters-loong64.h"
#include "jit/MacroAssembler.h"
#include "jit/MoveEmitter.h"
#include "util/Memory.h"
#include "vm/JitActivation.h" // js::jit::JitActivation
#include "vm/JSContext.h"
#include "wasm/WasmStubs.h"
#include "jit/MacroAssembler-inl.h"
namespace js {
namespace jit {
void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
ScratchRegisterScope scratch(asMasm());
ScratchDoubleScope fpscratch(asMasm());
as_ftintrne_l_d(fpscratch, input);
as_movfr2gr_d(output, fpscratch);
// if (res < 0); res = 0;
as_slt(scratch, output, zero);
as_masknez(output, output, scratch);
// if res > 255; res = 255;
as_sltui(scratch, output, 255);
as_addi_d(output, output, -255);
as_maskeqz(output, output, scratch);
as_addi_d(output, output, 255);
}
bool MacroAssemblerLOONG64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
asMasm().Push(ImmPtr(fakeReturnAddr));
asMasm().Push(FramePointer);
return true;
}
void MacroAssemblerLOONG64Compat::convertUInt32ToDouble(Register src,
FloatRegister dest) {
ScratchRegisterScope scratch(asMasm());
as_bstrpick_d(scratch, src, 31, 0);
asMasm().convertInt64ToDouble(Register64(scratch), dest);
}
void MacroAssemblerLOONG64Compat::convertUInt64ToDouble(Register src,
FloatRegister dest) {
Label positive, done;
ma_b(src, src, &positive, NotSigned, ShortJump);
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
MOZ_ASSERT(src != scratch);
MOZ_ASSERT(src != scratch2);
ma_and(scratch, src, Imm32(1));
as_srli_d(scratch2, src, 1);
as_or(scratch, scratch, scratch2);
as_movgr2fr_d(dest, scratch);
as_ffint_d_l(dest, dest);
asMasm().addDouble(dest, dest);
ma_b(&done, ShortJump);
bind(&positive);
as_movgr2fr_d(dest, src);
as_ffint_d_l(dest, dest);
bind(&done);
}
void MacroAssemblerLOONG64Compat::convertUInt32ToFloat32(Register src,
FloatRegister dest) {
ScratchRegisterScope scratch(asMasm());
as_bstrpick_d(scratch, src, 31, 0);
asMasm().convertInt64ToFloat32(Register64(scratch), dest);
}
void MacroAssemblerLOONG64Compat::convertDoubleToFloat32(FloatRegister src,
FloatRegister dest) {
as_fcvt_s_d(dest, src);
}
const int CauseBitPos = int(Assembler::CauseI);
const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
(1 << int(Assembler::CauseV))) >>
int(Assembler::CauseI);
// Checks whether a double is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerLOONG64Compat::convertDoubleToInt32(FloatRegister src,
Register dest,
Label* fail,
bool negativeZeroCheck) {
if (negativeZeroCheck) {
moveFromDouble(src, dest);
as_rotri_d(dest, dest, 63);
ma_b(dest, Imm32(1), fail, Assembler::Equal);
}
ScratchRegisterScope scratch(asMasm());
ScratchFloat32Scope fpscratch(asMasm());
// Truncate double to int ; if result is inexact or invalid fail.
as_ftintrz_w_d(fpscratch, src);
as_movfcsr2gr(scratch);
moveFromFloat32(fpscratch, dest);
as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
as_andi(scratch, scratch,
CauseIOrVMask); // masking for Inexact and Invalid flag.
ma_b(scratch, zero, fail, Assembler::NotEqual);
}
void MacroAssemblerLOONG64Compat::convertDoubleToPtr(FloatRegister src,
Register dest, Label* fail,
bool negativeZeroCheck) {
if (negativeZeroCheck) {
moveFromDouble(src, dest);
as_rotri_d(dest, dest, 63);
ma_b(dest, Imm32(1), fail, Assembler::Equal);
}
ScratchRegisterScope scratch(asMasm());
ScratchDoubleScope fpscratch(asMasm());
// Truncate double to int64 ; if result is inexact or invalid fail.
as_ftintrz_l_d(fpscratch, src);
as_movfcsr2gr(scratch);
moveFromDouble(fpscratch, dest);
as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
as_andi(scratch, scratch,
CauseIOrVMask); // masking for Inexact and Invalid flag.
ma_b(scratch, zero, fail, Assembler::NotEqual);
}
// Checks whether a float32 is representable as a 32-bit integer. If so, the
// integer is written to the output register. Otherwise, a bailout is taken to
// the given snapshot. This function overwrites the scratch float register.
void MacroAssemblerLOONG64Compat::convertFloat32ToInt32(
FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
if (negativeZeroCheck) {
moveFromFloat32(src, dest);
ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
}
ScratchRegisterScope scratch(asMasm());
ScratchFloat32Scope fpscratch(asMasm());
as_ftintrz_w_s(fpscratch, src);
as_movfcsr2gr(scratch);
moveFromFloat32(fpscratch, dest);
MOZ_ASSERT(CauseBitPos + CauseBitCount < 33);
MOZ_ASSERT(CauseBitPos < 32);
as_bstrpick_w(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
as_andi(scratch, scratch, CauseIOrVMask);
ma_b(scratch, zero, fail, Assembler::NotEqual);
}
void MacroAssemblerLOONG64Compat::convertFloat32ToDouble(FloatRegister src,
FloatRegister dest) {
as_fcvt_d_s(dest, src);
}
void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(Register src,
FloatRegister dest) {
as_movgr2fr_w(dest, src);
as_ffint_s_w(dest, dest);
}
void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(const Address& src,
FloatRegister dest) {
ma_fld_s(dest, src);
as_ffint_s_w(dest, dest);
}
void MacroAssemblerLOONG64Compat::movq(Register rj, Register rd) {
as_or(rd, rj, zero);
}
void MacroAssemblerLOONG64::ma_li(Register dest, CodeLabel* label) {
BufferOffset bo = m_buffer.nextOffset();
ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
label->patchAt()->bind(bo.getOffset());
label->setLinkMode(CodeLabel::MoveImmediate);
}
void MacroAssemblerLOONG64::ma_li(Register dest, ImmWord imm) {
int64_t value = imm.value;
if (-1 == (value >> 11) || 0 == (value >> 11)) {
as_addi_w(dest, zero, value);
return;
}
if (0 == (value >> 12)) {
as_ori(dest, zero, value);
return;
}
if (-1 == (value >> 31) || 0 == (value >> 31)) {
as_lu12i_w(dest, (value >> 12) & 0xfffff);
} else if (0 == (value >> 32)) {
as_lu12i_w(dest, (value >> 12) & 0xfffff);
as_bstrins_d(dest, zero, 63, 32);
} else if (-1 == (value >> 51) || 0 == (value >> 51)) {
if (is_uintN((value >> 12) & 0xfffff, 20)) {
as_lu12i_w(dest, (value >> 12) & 0xfffff);
}
as_lu32i_d(dest, (value >> 32) & 0xfffff);
} else if (0 == (value >> 52)) {
if (is_uintN((value >> 12) & 0xfffff, 20)) {
as_lu12i_w(dest, (value >> 12) & 0xfffff);
}
as_lu32i_d(dest, (value >> 32) & 0xfffff);
as_bstrins_d(dest, zero, 63, 52);
} else {
if (is_uintN((value >> 12) & 0xfffff, 20)) {
as_lu12i_w(dest, (value >> 12) & 0xfffff);
}
if (is_uintN((value >> 32) & 0xfffff, 20)) {
as_lu32i_d(dest, (value >> 32) & 0xfffff);
}
as_lu52i_d(dest, dest, (value >> 52) & 0xfff);
}
if (is_uintN(value & 0xfff, 12)) {
as_ori(dest, dest, value & 0xfff);
}
}
// This method generates lu32i_d, lu12i_w and ori instruction block that can be
// modified by UpdateLoad64Value, either during compilation (eg.
// Assembler::bind), or during execution (eg. jit::PatchJump).
void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmPtr imm) {
return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
}
void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmWord imm,
LiFlags flags) {
// hi12, hi20, low20, low12
if (Li64 == flags) { // Li64: Imm data
m_buffer.ensureSpace(4 * sizeof(uint32_t));
as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
as_ori(dest, dest, imm.value & 0xfff); // low12
as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
as_lu52i_d(dest, dest, imm.value >> 52 & 0xfff); // hi12
} else { // Li48 address
m_buffer.ensureSpace(3 * sizeof(uint32_t));
as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
as_ori(dest, dest, imm.value & 0xfff); // low12
as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
}
}
// Memory access ops.
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_b(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_b(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_b(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_b(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_bu(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_bu(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_bu(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_bu(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_h(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_h(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_h(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_h(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_hu(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_hu(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_hu(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_hu(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_w(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_w(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_w(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_w(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_wu(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_wu(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_wu(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_wu(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_d(Register dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_ld_d(dest, base, offset);
} else if (base != dest) {
ma_li(dest, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_d(dest, base, dest);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_ldx_d(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_st_b(Register src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_st_b(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(src != scratch);
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_stx_b(src, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_st_h(Register src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_st_h(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(src != scratch);
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_stx_h(src, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_st_w(Register src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_st_w(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(src != scratch);
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_stx_w(src, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_st_d(Register src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = FaultingCodeOffset(currentOffset());
as_st_d(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(src != scratch);
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = FaultingCodeOffset(currentOffset());
as_stx_d(src, base, scratch);
}
return fco;
}
// Arithmetic-based ops.
// Add.
void MacroAssemblerLOONG64::ma_add_d(Register rd, Register rj, Imm32 imm) {
if (is_intN(imm.value, 12)) {
as_addi_d(rd, rj, imm.value);
} else if (rd != rj) {
ma_li(rd, imm);
as_add_d(rd, rj, rd);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_add_d(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
Register rk, Label* overflow) {
ScratchRegisterScope scratch(asMasm());
as_add_d(scratch, rj, rk);
as_add_w(rd, rj, rk);
ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
}
void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
// Check for signed range because of as_addi_d
if (is_intN(imm.value, 12)) {
ScratchRegisterScope scratch(asMasm());
as_addi_d(scratch, rj, imm.value);
as_addi_w(rd, rj, imm.value);
ma_b(rd, scratch, overflow, Assembler::NotEqual);
} else {
SecondScratchRegisterScope scratch2(asMasm());
ma_li(scratch2, imm);
ma_add32TestOverflow(rd, rj, scratch2, overflow);
}
}
void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
Register rk,
Label* overflow) {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rd != scratch);
if (rj == rk) {
if (rj == rd) {
as_or(scratch, rj, zero);
rj = scratch;
}
as_add_d(rd, rj, rj);
as_xor(scratch, rj, rd);
ma_b(scratch, zero, overflow, Assembler::LessThan);
} else {
SecondScratchRegisterScope scratch2(asMasm());
MOZ_ASSERT(rj != scratch);
MOZ_ASSERT(rd != scratch2);
if (rj == rd) {
as_or(scratch2, rj, zero);
rj = scratch2;
}
as_add_d(rd, rj, rk);
// rd = rj + rk overflow conditions:
// 1. rj < 0 and rd >= rk
// 2. rj >= 0 and rd < rk
as_slti(scratch, rj, 0);
as_slt(scratch2, rd, rk);
ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
}
}
void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
SecondScratchRegisterScope scratch2(asMasm());
if (imm.value == 0) {
as_ori(rd, rj, 0);
return;
}
if (rj == rd) {
as_ori(scratch2, rj, 0);
rj = scratch2;
}
ma_add_d(rd, rj, imm);
if (imm.value > 0) {
ma_b(rd, rj, overflow, Assembler::LessThan);
} else {
MOZ_ASSERT(imm.value < 0);
ma_b(rd, rj, overflow, Assembler::GreaterThan);
}
}
void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
ImmWord imm,
Label* overflow) {
SecondScratchRegisterScope scratch2(asMasm());
if (imm.value == 0) {
as_ori(rd, rj, 0);
return;
}
if (rj == rd) {
MOZ_ASSERT(rj != scratch2);
as_ori(scratch2, rj, 0);
rj = scratch2;
}
ma_li(rd, imm);
as_add_d(rd, rj, rd);
if (imm.value > 0) {
ma_b(rd, rj, overflow, Assembler::LessThan);
} else {
MOZ_ASSERT(imm.value < 0);
ma_b(rd, rj, overflow, Assembler::GreaterThan);
}
}
void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
Register rj, Register rk,
Label* label) {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rd != rk);
MOZ_ASSERT(rd != scratch);
as_add_d(rd, rj, rk);
as_sltu(scratch, rd, rk);
ma_b(scratch, Register(scratch), label,
cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
}
void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
Register rj, Imm32 imm,
Label* label) {
SecondScratchRegisterScope scratch2(asMasm());
// Check for signed range because of as_addi_d
if (is_intN(imm.value, 12)) {
as_addi_d(rd, rj, imm.value);
as_sltui(scratch2, rd, imm.value);
ma_b(scratch2, scratch2, label,
cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
} else {
ma_li(scratch2, imm);
ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
}
}
void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
Register rj, ImmWord imm,
Label* label) {
SecondScratchRegisterScope scratch2(asMasm());
// Check for signed range because of as_addi_d
if (is_intN(imm.value, 12)) {
as_addi_d(rd, rj, imm.value);
as_sltui(scratch2, rd, imm.value);
ma_b(scratch2, scratch2, label,
cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
} else {
ma_li(scratch2, imm);
ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
}
}
// Subtract.
void MacroAssemblerLOONG64::ma_sub_d(Register rd, Register rj, Imm32 imm) {
if (is_intN(-imm.value, 12)) {
as_addi_d(rd, rj, -imm.value);
} else {
ScratchRegisterScope scratch(asMasm());
ma_li(scratch, imm);
as_sub_d(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
Register rk, Label* overflow) {
ScratchRegisterScope scratch(asMasm());
as_sub_d(scratch, rj, rk);
as_sub_w(rd, rj, rk);
ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
}
void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
Register rk,
Label* overflow) {
SecondScratchRegisterScope scratch2(asMasm());
MOZ_ASSERT_IF(rj == rd, rj != rk);
MOZ_ASSERT(rj != scratch2);
MOZ_ASSERT(rk != scratch2);
MOZ_ASSERT(rd != scratch2);
Register rj_copy = rj;
if (rj == rd) {
as_or(scratch2, rj, zero);
rj_copy = scratch2;
}
{
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rd != scratch);
as_sub_d(rd, rj, rk);
// If the sign of rj and rk are the same, no overflow
as_xor(scratch, rj_copy, rk);
// Check if the sign of rd and rj are the same
as_xor(scratch2, rd, rj_copy);
as_and(scratch2, scratch2, scratch);
}
ma_b(scratch2, zero, overflow, Assembler::LessThan);
}
void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
// TODO(loong64): Check subPtrTestOverflow
MOZ_ASSERT(imm.value != INT32_MIN);
ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
}
void MacroAssemblerLOONG64::ma_mul_d(Register rd, Register rj, Imm32 imm) {
// li handles the relocation.
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_mul_d(rd, rj, scratch);
}
void MacroAssemblerLOONG64::ma_mulh_d(Register rd, Register rj, Imm32 imm) {
// li handles the relocation.
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_mulh_d(rd, rj, scratch);
}
void MacroAssemblerLOONG64::ma_mulPtrTestOverflow(Register rd, Register rj,
Register rk,
Label* overflow) {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
MOZ_ASSERT(rd != scratch);
if (rd == rj) {
as_or(scratch, rj, zero);
rj = scratch;
rk = (rd == rk) ? rj : rk;
} else if (rd == rk) {
as_or(scratch, rk, zero);
rk = scratch;
}
as_mul_d(rd, rj, rk);
as_mulh_d(scratch, rj, rk);
as_srai_d(scratch2, rd, 63);
ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
}
// Memory.
FaultingCodeOffset MacroAssemblerLOONG64::ma_load(
Register dest, Address address, LoadStoreSize size,
LoadStoreExtension extension) {
int32_t encodedOffset;
Register base;
FaultingCodeOffset fco;
// TODO: use as_ldx_b/h/w/d, could decrease as_add_d instr.
switch (size) {
case SizeByte:
case SizeHalfWord:
if (!is_intN(address.offset, 12)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeByte) {
if (ZeroExtend == extension) {
as_ld_bu(dest, base, encodedOffset);
} else {
as_ld_b(dest, base, encodedOffset);
}
} else {
if (ZeroExtend == extension) {
as_ld_hu(dest, base, encodedOffset);
} else {
as_ld_h(dest, base, encodedOffset);
}
}
break;
case SizeWord:
case SizeDouble:
if ((address.offset & 0x3) == 0 &&
(size == SizeDouble ||
(size == SizeWord && SignExtend == extension))) {
if (!Imm16::IsInSignedRange(address.offset)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeWord) {
as_ldptr_w(dest, base, encodedOffset);
} else {
as_ldptr_d(dest, base, encodedOffset);
}
} else {
if (!is_intN(address.offset, 12)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeWord) {
if (ZeroExtend == extension) {
as_ld_wu(dest, base, encodedOffset);
} else {
as_ld_w(dest, base, encodedOffset);
}
} else {
as_ld_d(dest, base, encodedOffset);
}
}
break;
default:
MOZ_CRASH("Invalid argument for ma_load");
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_store(
Register data, Address address, LoadStoreSize size,
LoadStoreExtension extension) {
int32_t encodedOffset;
Register base;
FaultingCodeOffset fco;
// TODO: use as_stx_b/h/w/d, could decrease as_add_d instr.
switch (size) {
case SizeByte:
case SizeHalfWord:
if (!is_intN(address.offset, 12)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeByte) {
as_st_b(data, base, encodedOffset);
} else {
as_st_h(data, base, encodedOffset);
}
break;
case SizeWord:
case SizeDouble:
if ((address.offset & 0x3) == 0) {
if (!Imm16::IsInSignedRange(address.offset)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeWord) {
as_stptr_w(data, base, encodedOffset);
} else {
as_stptr_d(data, base, encodedOffset);
}
} else {
if (!is_intN(address.offset, 12)) {
ma_li(ScratchRegister, Imm32(address.offset));
as_add_d(ScratchRegister, address.base, ScratchRegister);
base = ScratchRegister;
encodedOffset = 0;
} else {
encodedOffset = address.offset;
base = address.base;
}
fco = FaultingCodeOffset(currentOffset());
if (size == SizeWord) {
as_st_w(data, base, encodedOffset);
} else {
as_st_d(data, base, encodedOffset);
}
}
break;
default:
MOZ_CRASH("Invalid argument for ma_store");
}
return fco;
}
void MacroAssemblerLOONG64Compat::computeScaledAddress(const BaseIndex& address,
Register dest) {
Register base = address.base;
Register index = address.index;
int32_t shift = Imm32::ShiftOf(address.scale).value;
if (shift) {
MOZ_ASSERT(shift <= 4);
as_alsl_d(dest, index, base, shift - 1);
} else {
as_add_d(dest, base, index);
}
}
void MacroAssemblerLOONG64::ma_pop(Register r) {
MOZ_ASSERT(r != StackPointer);
as_ld_d(r, StackPointer, 0);
as_addi_d(StackPointer, StackPointer, sizeof(intptr_t));
}
void MacroAssemblerLOONG64::ma_push(Register r) {
if (r == StackPointer) {
ScratchRegisterScope scratch(asMasm());
as_or(scratch, r, zero);
as_addi_d(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t));
as_st_d(scratch, StackPointer, 0);
} else {
as_addi_d(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t));
as_st_d(r, StackPointer, 0);
}
}
// Branches when done from within loongarch-specific code.
void MacroAssemblerLOONG64::ma_b(Register lhs, ImmWord imm, Label* label,
Condition c, JumpKind jumpKind) {
if (imm.value <= INT32_MAX) {
ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(lhs != scratch);
ma_li(scratch, imm);
ma_b(lhs, Register(scratch), label, c, jumpKind, scratch);
}
}
void MacroAssemblerLOONG64::ma_b(Register lhs, Address addr, Label* label,
Condition c, JumpKind jumpKind) {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(lhs != scratch);
ma_ld_d(scratch, addr);
ma_b(lhs, Register(scratch), label, c, jumpKind, scratch);
}
void MacroAssemblerLOONG64::ma_b(Address addr, Imm32 imm, Label* label,
Condition c, JumpKind jumpKind) {
SecondScratchRegisterScope scratch2(asMasm());
ma_ld_d(scratch2, addr);
ma_b(Register(scratch2), imm, label, c, jumpKind);
}
void MacroAssemblerLOONG64::ma_b(Address addr, ImmGCPtr imm, Label* label,
Condition c, JumpKind jumpKind) {
SecondScratchRegisterScope scratch2(asMasm());
ma_ld_d(scratch2, addr);
ma_b(Register(scratch2), imm, label, c, jumpKind);
}
void MacroAssemblerLOONG64::ma_bl(Label* label) {
spew("branch .Llabel %p\n", label);
if (label->bound()) {
// Generate the long jump for calls because return address has to be
// the address after the reserved block.
addLongJump(nextOffset(), BufferOffset(label->offset()));
ScratchRegisterScope scratch(asMasm());
ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
as_jirl(ra, scratch, BOffImm16(0));
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain =
label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer. The '5'
// instructions are writing at below.
m_buffer.ensureSpace(5 * sizeof(uint32_t));
spew("bal .Llabel %p\n", label);
BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
writeInst(nextInChain);
if (!oom()) {
label->use(bo.getOffset());
}
// Leave space for long jump.
as_nop();
as_nop();
as_nop();
}
void MacroAssemblerLOONG64::branchWithCode(InstImm code, Label* label,
JumpKind jumpKind,
Register scratch) {
// simply output the pointer of one label as its id,
// notice that after one label destructor, the pointer will be reused.
spew("branch .Llabel %p", label);
MOZ_ASSERT(code.encode() !=
InstImm(op_jirl, BOffImm16(0), zero, ra).encode());
InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
if (label->bound()) {
int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
if (BOffImm16::IsInRange(offset)) {
jumpKind = ShortJump;
}
// ShortJump
if (jumpKind == ShortJump) {
MOZ_ASSERT(BOffImm16::IsInRange(offset));
if (code.extractBitField(31, 26) == ((uint32_t)op_bcz >> 26)) {
code.setImm21(offset);
} else {
code.setBOffImm16(BOffImm16(offset));
}
#ifdef JS_JITSPEW
decodeBranchInstAndSpew(code);
#endif
writeInst(code.encode());
return;
}
// LongJump
if (code.encode() == inst_beq.encode()) {
// Handle long jump
addLongJump(nextOffset(), BufferOffset(label->offset()));
if (scratch == Register::Invalid()) {
ScratchRegisterScope scratch(asMasm());
ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
} else {
ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
}
as_nop();
return;
}
// OpenLongJump
// Handle long conditional branch, the target offset is based on self,
// point to next instruction of nop at below.
spew("invert branch .Llabel %p", label);
InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
#ifdef JS_JITSPEW
decodeBranchInstAndSpew(code_r);
#endif
writeInst(code_r.encode());
addLongJump(nextOffset(), BufferOffset(label->offset()));
if (scratch == Register::Invalid()) {
ScratchRegisterScope scratch(asMasm());
ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
} else {
ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
}
as_nop();
return;
}
// Generate open jump and link it to a label.
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain =
label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
if (jumpKind == ShortJump) {
// Make the whole branch continous in the buffer.
m_buffer.ensureSpace(2 * sizeof(uint32_t));
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
#ifdef JS_JITSPEW
decodeBranchInstAndSpew(code);
#endif
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom()) {
label->use(bo.getOffset());
}
return;
}
bool conditional = code.encode() != inst_beq.encode();
// Make the whole branch continous in the buffer. The '5'
// instructions are writing at below (contain conditional nop).
m_buffer.ensureSpace(5 * sizeof(uint32_t));
#ifdef JS_JITSPEW
decodeBranchInstAndSpew(code);
#endif
BufferOffset bo = writeInst(code.encode()); // invert
writeInst(nextInChain);
if (!oom()) {
label->use(bo.getOffset());
}
// Leave space for potential long jump.
as_nop();
as_nop();
if (conditional) {
as_nop();
}
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
Condition c) {
if (imm.value <= INT32_MAX) {
ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
} else {
ScratchRegisterScope scratch(asMasm());
ma_li(scratch, imm);
ma_cmp_set(rd, rj, scratch, c);
}
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
Condition c) {
ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address, Imm32 imm,
Condition c) {
// TODO(loong64): 32-bit ma_cmp_set?
SecondScratchRegisterScope scratch2(asMasm());
ma_ld_w(scratch2, address);
ma_cmp_set(rd, Register(scratch2), imm, c);
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address,
Register rk, Condition c) {
SecondScratchRegisterScope scratch2(asMasm());
ma_ld_d(scratch2, address);
ma_cmp_set(rd, scratch2, rk, c);
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address,
ImmWord imm, Condition c) {
SecondScratchRegisterScope scratch2(asMasm());
ma_ld_d(scratch2, address);
ma_cmp_set(rd, Register(scratch2), imm, c);
}
// fp instructions
void MacroAssemblerLOONG64::ma_lid(FloatRegister dest, double value) {
ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
if (imm.value != 0) {
ScratchRegisterScope scratch(asMasm());
ma_li(scratch, imm);
moveToDouble(scratch, dest);
} else {
moveToDouble(zero, dest);
}
}
void MacroAssemblerLOONG64::ma_mv(FloatRegister src, ValueOperand dest) {
as_movfr2gr_d(dest.valueReg(), src);
}
void MacroAssemblerLOONG64::ma_mv(ValueOperand src, FloatRegister dest) {
as_movgr2fr_d(dest, src.valueReg());
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_s(FloatRegister dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
js::wasm::FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fld_s(dest, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fldx_s(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_d(FloatRegister dest,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
js::wasm::FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fld_d(dest, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fldx_d(dest, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_s(FloatRegister src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
js::wasm::FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fst_s(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fstx_s(src, base, scratch);
}
return fco;
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_d(FloatRegister src,
Address address) {
int32_t offset = address.offset;
Register base = address.base;
js::wasm::FaultingCodeOffset fco;
if (is_intN(offset, 12)) {
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fst_d(src, base, offset);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(base != scratch);
ma_li(scratch, Imm32(offset));
fco = js::wasm::FaultingCodeOffset(currentOffset());
as_fstx_d(src, base, scratch);
}
return fco;
}
void MacroAssemblerLOONG64::ma_pop(FloatRegister f) {
as_fld_d(f, StackPointer, 0);
as_addi_d(StackPointer, StackPointer, sizeof(double));
}
void MacroAssemblerLOONG64::ma_push(FloatRegister f) {
as_addi_d(StackPointer, StackPointer, (int32_t)-sizeof(double));
as_fst_d(f, StackPointer, 0);
}
void MacroAssemblerLOONG64::ma_li(Register dest, ImmGCPtr ptr) {
writeDataRelocation(ptr);
asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
}
void MacroAssemblerLOONG64::ma_li(Register dest, Imm32 imm) {
if (is_intN(imm.value, 12)) {
as_addi_w(dest, zero, imm.value);
} else if (is_uintN(imm.value, 12)) {
as_ori(dest, zero, imm.value & 0xfff);
} else {
as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
if (imm.value & 0xfff) {
as_ori(dest, dest, imm.value & 0xfff);
}
}
}
// This method generates lu12i_w and ori instruction pair that can be modified
// by UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
// during execution (eg. jit::PatchJump).
void MacroAssemblerLOONG64::ma_liPatchable(Register dest, Imm32 imm) {
m_buffer.ensureSpace(2 * sizeof(uint32_t));
as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
as_ori(dest, dest, imm.value & 0xfff);
}
void MacroAssemblerLOONG64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
FloatRegister fj, Register rk) {
Label done;
ma_b(rk, zero, &done, Assembler::NotEqual);
if (fmt == SingleFloat) {
as_fmov_s(fd, fj);
} else {
as_fmov_d(fd, fj);
}
bind(&done);
}
void MacroAssemblerLOONG64::ma_fmovn(FloatFormat fmt, FloatRegister fd,
FloatRegister fj, Register rk) {
Label done;
ma_b(rk, zero, &done, Assembler::Equal);
if (fmt == SingleFloat) {
as_fmov_s(fd, fj);
} else {
as_fmov_d(fd, fj);
}
bind(&done);
}
void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm,
bool bit32) {
if (is_uintN(imm.value, 12)) {
as_andi(rd, rj, imm.value);
} else if (rd != rj) {
ma_li(rd, imm);
as_and(rd, rj, rd);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_and(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm,
bool bit32) {
if (is_uintN(imm.value, 12)) {
as_ori(rd, rj, imm.value);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_or(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm,
bool bit32) {
if (is_uintN(imm.value, 12)) {
as_xori(rd, rj, imm.value);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_xor(rd, rj, scratch);
}
}
// Arithmetic-based ops.
// Add.
void MacroAssemblerLOONG64::ma_add_w(Register rd, Register rj, Imm32 imm) {
if (is_intN(imm.value, 12)) {
as_addi_w(rd, rj, imm.value);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_add_w(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
Register rj, Register rk,
Label* overflow) {
MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
MOZ_ASSERT_IF(rd == rj, rk != rd);
ScratchRegisterScope scratch(asMasm());
as_add_w(rd, rj, rk);
as_sltu(scratch, rd, rd == rj ? rk : rj);
ma_b(Register(scratch), Register(scratch), overflow,
cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
}
void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
Register rj, Imm32 imm,
Label* overflow) {
SecondScratchRegisterScope scratch2(asMasm());
MOZ_ASSERT(rj != scratch2);
ma_li(scratch2, imm);
ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
}
// Subtract.
void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Imm32 imm) {
if (is_intN(-imm.value, 12)) {
as_addi_w(rd, rj, -imm.value);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_sub_w(rd, rj, scratch);
}
}
void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Register rk) {
as_sub_w(rd, rj, rk);
}
void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
if (imm.value != INT32_MIN) {
asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
} else {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, Imm32(imm.value));
asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
}
}
void MacroAssemblerLOONG64::ma_mul(Register rd, Register rj, Imm32 imm) {
ScratchRegisterScope scratch(asMasm());
MOZ_ASSERT(rj != scratch);
ma_li(scratch, imm);
as_mul_w(rd, rj, scratch);
}
void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
Register rk, Label* overflow) {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
as_mulh_w(scratch, rj, rk);
as_mul_w(rd, rj, rk);
as_srai_w(scratch2, rd, 31);
ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
}
void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_li(scratch, imm);
as_mulh_w(scratch2, rj, scratch);
as_mul_w(rd, rj, scratch);
as_srai_w(scratch, rd, 31);
ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
}
void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
Register rk,
Label* overflow) {
ScratchRegisterScope scratch(asMasm());
as_mod_w(scratch, rj, rk);
ma_b(scratch, scratch, overflow, Assembler::NonZero);
as_div_w(rd, rj, rk);
}
void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
Imm32 imm, Label* overflow) {
SecondScratchRegisterScope scratch2(asMasm());
ma_li(scratch2, imm);
ma_div_branch_overflow(rd, rj, scratch2, overflow);
}
void MacroAssemblerLOONG64::ma_mod_mask(Register src, Register dest,
Register hold, Register remain,
int32_t shift, Label* negZero) {
// MATH:
// We wish to compute x % (1<<y) - 1 for a known constant, y.
// First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
// dividend as a number in base b, namely
// c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
// now, since both addition and multiplication commute with modulus,
// x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
// (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
// now, since b == C + 1, b % C == 1, and b^n % C == 1
// this means that the whole thing simplifies to:
// c_0 + c_1 + c_2 ... c_n % C
// each c_n can easily be computed by a shift/bitextract, and the modulus
// can be maintained by simply subtracting by C whenever the number gets
// over C.
int32_t mask = (1 << shift) - 1;
Label head, negative, sumSigned, done;
// hold holds -1 if the value was negative, 1 otherwise.
// remain holds the remaining bits that have not been processed
// SecondScratchReg serves as a temporary location to store extracted bits
// into as well as holding the trial subtraction as a temp value dest is
// the accumulator (and holds the final result)
// move the whole value into the remain.
as_or(remain, src, zero);
// Zero out the dest.
ma_li(dest, Imm32(0));
// Set the hold appropriately.
ma_b(remain, remain, &negative, Signed, ShortJump);
ma_li(hold, Imm32(1));
ma_b(&head, ShortJump);
bind(&negative);
ma_li(hold, Imm32(-1));
as_sub_w(remain, zero, remain);
// Begin the main loop.
bind(&head);
SecondScratchRegisterScope scratch2(asMasm());
// Extract the bottom bits into SecondScratchReg.
ma_and(scratch2, remain, Imm32(mask));
// Add those bits to the accumulator.
as_add_w(dest, dest, scratch2);
// Do a trial subtraction
ma_sub_w(scratch2, dest, Imm32(mask));
// If (sum - C) > 0, store sum - C back into sum, thus performing a
// modulus.
ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
as_or(dest, scratch2, zero);
bind(&sumSigned);
// Get rid of the bits that we extracted before.
as_srli_w(remain, remain, shift);
// If the shift produced zero, finish, otherwise, continue in the loop.
ma_b(remain, remain, &head, NonZero, ShortJump);
// Check the hold to see if we need to negate the result.
ma_b(hold, hold, &done, NotSigned, ShortJump);
// If the hold was non-zero, negate the result to be in line with
// what JS wants
if (negZero != nullptr) {
// Jump out in case of negative zero.
ma_b(hold, hold, negZero, Zero);
as_sub_w(dest, zero, dest);
} else {
as_sub_w(dest, zero, dest);
}
bind(&done);
}
// Memory.
FaultingCodeOffset MacroAssemblerLOONG64::ma_load(
Register dest, const BaseIndex& src, LoadStoreSize size,
LoadStoreExtension extension) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(src, scratch2);
return asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_store(
Register data, const BaseIndex& dest, LoadStoreSize size,
LoadStoreExtension extension) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(dest, scratch2);
return asMasm().ma_store(data, Address(scratch2, dest.offset), size,
extension);
}
void MacroAssemblerLOONG64::ma_store(Imm32 imm, const BaseIndex& dest,
LoadStoreSize size,
LoadStoreExtension extension) {
SecondScratchRegisterScope scratch2(asMasm());
// Make sure that scratch2 contains absolute address so that offset is 0.
asMasm().computeEffectiveAddress(dest, scratch2);
ScratchRegisterScope scratch(asMasm());
// Scrach register is free now, use it for loading imm value
ma_li(scratch, imm);
// with offset=0 ScratchRegister will not be used in ma_store()
// so we can use it as a parameter here
asMasm().ma_store(scratch, Address(scratch2, 0), size, extension);
}
// Branches when done from within loongarch-specific code.
// TODO(loong64) Optimize ma_b
void MacroAssemblerLOONG64::ma_b(Register lhs, Register rhs, Label* label,
Condition c, JumpKind jumpKind,
Register scratch) {
switch (c) {
case Equal:
case NotEqual:
asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind,
scratch);
break;
case Always:
ma_b(label, jumpKind);
break;
case Zero:
case NonZero:
case Signed:
case NotSigned:
MOZ_ASSERT(lhs == rhs);
asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind, scratch);
break;
default: {
Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
jumpKind, scratch);
break;
}
}
}
void MacroAssemblerLOONG64::ma_b(Register lhs, Imm32 imm, Label* label,
Condition c, JumpKind jumpKind) {
MOZ_ASSERT(c != Overflow);
if (imm.value == 0) {
if (c == Always || c == AboveOrEqual) {
ma_b(label, jumpKind);
} else if (c == Below) {
; // This condition is always false. No branch required.
} else {
asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
}
} else {
switch (c) {
case Equal:
case NotEqual:
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, label, c, jumpKind);
break;
default:
Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
jumpKind);
}
}
}
void MacroAssemblerLOONG64::ma_b(Register lhs, ImmPtr imm, Label* l,
Condition c, JumpKind jumpKind) {
asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
}
void MacroAssemblerLOONG64::ma_b(Label* label, JumpKind jumpKind) {
asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
}
Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
Register rhs, Condition c) {
switch (c) {
case Above:
// bgtu s,t,label =>
// sltu at,t,s
// bne at,$zero,offs
as_sltu(dest, rhs, lhs);
return NotEqual;
case AboveOrEqual:
// bgeu s,t,label =>
// sltu at,s,t
// beq at,$zero,offs
as_sltu(dest, lhs, rhs);
return Equal;
case Below:
// bltu s,t,label =>
// sltu at,s,t
// bne at,$zero,offs
as_sltu(dest, lhs, rhs);
return NotEqual;
case BelowOrEqual:
// bleu s,t,label =>
// sltu at,t,s
// beq at,$zero,offs
as_sltu(dest, rhs, lhs);
return Equal;
case GreaterThan:
// bgt s,t,label =>
// slt at,t,s
// bne at,$zero,offs
as_slt(dest, rhs, lhs);
return NotEqual;
case GreaterThanOrEqual:
// bge s,t,label =>
// slt at,s,t
// beq at,$zero,offs
as_slt(dest, lhs, rhs);
return Equal;
case LessThan:
// blt s,t,label =>
// slt at,s,t
// bne at,$zero,offs
as_slt(dest, lhs, rhs);
return NotEqual;
case LessThanOrEqual:
// ble s,t,label =>
// slt at,t,s
// beq at,$zero,offs
as_slt(dest, rhs, lhs);
return Equal;
default:
MOZ_CRASH("Invalid condition.");
}
return Always;
}
Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
Imm32 imm, Condition c) {
ScratchRegisterScope scratch(asMasm());
MOZ_RELEASE_ASSERT(lhs != scratch);
switch (c) {
case Above:
case BelowOrEqual:
if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12) &&
imm.value != -1) {
// lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
as_sltui(dest, lhs, imm.value + 1);
return (c == BelowOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
as_sltu(dest, scratch, lhs);
return (c == BelowOrEqual ? Equal : NotEqual);
}
case AboveOrEqual:
case Below:
if (is_intN(imm.value, 12)) {
as_sltui(dest, lhs, imm.value);
} else {
ma_li(scratch, imm);
as_sltu(dest, lhs, scratch);
}
return (c == AboveOrEqual ? Equal : NotEqual);
case GreaterThan:
case LessThanOrEqual:
if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12)) {
// lhs <= rhs via lhs < rhs + 1.
as_slti(dest, lhs, imm.value + 1);
return (c == LessThanOrEqual ? NotEqual : Equal);
} else {
ma_li(scratch, imm);
as_slt(dest, scratch, lhs);
return (c == LessThanOrEqual ? Equal : NotEqual);
}
case GreaterThanOrEqual:
case LessThan:
if (is_intN(imm.value, 12)) {
as_slti(dest, lhs, imm.value);
} else {
ma_li(scratch, imm);
as_slt(dest, lhs, scratch);
}
return (c == GreaterThanOrEqual ? Equal : NotEqual);
default:
MOZ_CRASH("Invalid condition.");
}
return Always;
}
// fp instructions
void MacroAssemblerLOONG64::ma_lis(FloatRegister dest, float value) {
Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
if (imm.value != 0) {
ScratchRegisterScope scratch(asMasm());
ma_li(scratch, imm);
moveToFloat32(scratch, dest);
} else {
moveToFloat32(zero, dest);
}
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_d(FloatRegister ft,
BaseIndex address) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(address, scratch2);
return asMasm().ma_fst_d(ft, Address(scratch2, address.offset));
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_s(FloatRegister ft,
BaseIndex address) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(address, scratch2);
return asMasm().ma_fst_s(ft, Address(scratch2, address.offset));
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_d(FloatRegister ft,
const BaseIndex& src) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(src, scratch2);
return asMasm().ma_fld_d(ft, Address(scratch2, src.offset));
}
FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_s(FloatRegister ft,
const BaseIndex& src) {
SecondScratchRegisterScope scratch2(asMasm());
asMasm().computeScaledAddress(src, scratch2);
return asMasm().ma_fld_s(ft, Address(scratch2, src.offset));
}
void MacroAssemblerLOONG64::ma_bc_s(FloatRegister lhs, FloatRegister rhs,
Label* label, DoubleCondition c,
JumpKind jumpKind, FPConditionBit fcc) {
compareFloatingPoint(SingleFloat, lhs, rhs, c, fcc);
asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
}
void MacroAssemblerLOONG64::ma_bc_d(FloatRegister lhs, FloatRegister rhs,
Label* label, DoubleCondition c,
JumpKind jumpKind, FPConditionBit fcc) {
compareFloatingPoint(DoubleFloat, lhs, rhs, c, fcc);
asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
}
void MacroAssemblerLOONG64::ma_call(ImmPtr dest) {
asMasm().ma_liPatchable(CallReg, dest);
as_jirl(ra, CallReg, BOffImm16(0));
}
void MacroAssemblerLOONG64::ma_jump(ImmPtr dest) {
ScratchRegisterScope scratch(asMasm());
asMasm().ma_liPatchable(scratch, dest);
as_jirl(zero, scratch, BOffImm16(0));
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Register rk,
Condition c) {
switch (c) {
case Equal:
// seq d,s,t =>
// xor d,s,t
// sltiu d,d,1
as_xor(rd, rj, rk);
as_sltui(rd, rd, 1);
break;
case NotEqual:
// sne d,s,t =>
// xor d,s,t
// sltu d,$zero,d
as_xor(rd, rj, rk);
as_sltu(rd, zero, rd);
break;
case Above:
// sgtu d,s,t =>
// sltu d,t,s
as_sltu(rd, rk, rj);
break;
case AboveOrEqual:
// sgeu d,s,t =>
// sltu d,s,t
// xori d,d,1
as_sltu(rd, rj, rk);
as_xori(rd, rd, 1);
break;
case Below:
// sltu d,s,t
as_sltu(rd, rj, rk);
break;
case BelowOrEqual:
// sleu d,s,t =>
// sltu d,t,s
// xori d,d,1
as_sltu(rd, rk, rj);
as_xori(rd, rd, 1);
break;
case GreaterThan:
// sgt d,s,t =>
// slt d,t,s
as_slt(rd, rk, rj);
break;
case GreaterThanOrEqual:
// sge d,s,t =>
// slt d,s,t
// xori d,d,1
as_slt(rd, rj, rk);
as_xori(rd, rd, 1);
break;
case LessThan:
// slt d,s,t
as_slt(rd, rj, rk);
break;
case LessThanOrEqual:
// sle d,s,t =>
// slt d,t,s
// xori d,d,1
as_slt(rd, rk, rj);
as_xori(rd, rd, 1);
break;
case Zero:
MOZ_ASSERT(rj == rk);
// seq d,s,$zero =>
// sltiu d,s,1
as_sltui(rd, rj, 1);
break;
case NonZero:
MOZ_ASSERT(rj == rk);
// sne d,s,$zero =>
// sltu d,$zero,s
as_sltu(rd, zero, rj);
break;
case Signed:
MOZ_ASSERT(rj == rk);
as_slt(rd, rj, zero);
break;
case NotSigned:
MOZ_ASSERT(rj == rk);
// sge d,s,$zero =>
// slt d,s,$zero
// xori d,d,1
as_slt(rd, rj, zero);
as_xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition.");
}
}
void MacroAssemblerLOONG64::ma_cmp_set_double(Register dest, FloatRegister lhs,
FloatRegister rhs,
DoubleCondition c) {
compareFloatingPoint(DoubleFloat, lhs, rhs, c);
as_movcf2gr(dest, FCC0);
}
void MacroAssemblerLOONG64::ma_cmp_set_float32(Register dest, FloatRegister lhs,
FloatRegister rhs,
DoubleCondition c) {
compareFloatingPoint(SingleFloat, lhs, rhs, c);
as_movcf2gr(dest, FCC0);
}
void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
Condition c) {
if (imm.value == 0) {
switch (c) {
case Equal:
case BelowOrEqual:
as_sltui(rd, rj, 1);
break;
case NotEqual:
case Above:
as_sltu(rd, zero, rj);
break;
case AboveOrEqual:
case Below:
as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
break;
case GreaterThan:
case LessThanOrEqual:
as_slt(rd, zero, rj);
if (c == LessThanOrEqual) {
as_xori(rd, rd, 1);
}
break;
case LessThan:
case GreaterThanOrEqual:
as_slt(rd, rj, zero);
if (c == GreaterThanOrEqual) {
as_xori(rd, rd, 1);
}
break;
case Zero:
as_sltui(rd, rj, 1);
break;
case NonZero:
as_sltu(rd, zero, rj);
break;
case Signed:
as_slt(rd, rj, zero);
break;
case NotSigned:
as_slt(rd, rj, zero);
as_xori(rd, rd, 1);
break;
default:
MOZ_CRASH("Invalid condition.");
}
return;
}
switch (c) {
case Equal:
case NotEqual:
ma_xor(rd, rj, imm);
if (c == Equal) {
as_sltui(rd, rd, 1);
} else {
as_sltu(rd, zero, rd);
}
break;
case Zero:
case NonZero:
case Signed:
case NotSigned:
MOZ_CRASH("Invalid condition.");
default:
Condition cond = ma_cmp(rd, rj, imm, c);
MOZ_ASSERT(cond == Equal || cond == NotEqual);
if (cond == Equal) as_xori(rd, rd, 1);
}
}
void MacroAssemblerLOONG64::compareFloatingPoint(FloatFormat fmt,
FloatRegister lhs,
FloatRegister rhs,
DoubleCondition c,
FPConditionBit fcc) {
switch (c) {
case DoubleOrdered:
as_fcmp_cor(fmt, lhs, rhs, fcc);
break;
case DoubleEqual:
as_fcmp_ceq(fmt, lhs, rhs, fcc);
break;
case DoubleNotEqual:
as_fcmp_cne(fmt, lhs, rhs, fcc);
break;
case DoubleGreaterThan:
as_fcmp_clt(fmt, rhs, lhs, fcc);
break;
case DoubleGreaterThanOrEqual:
as_fcmp_cle(fmt, rhs, lhs, fcc);
break;
case DoubleLessThan:
as_fcmp_clt(fmt, lhs, rhs, fcc);
break;
case DoubleLessThanOrEqual:
as_fcmp_cle(fmt, lhs, rhs, fcc);
break;
case DoubleUnordered:
as_fcmp_cun(fmt, lhs, rhs, fcc);
break;
case DoubleEqualOrUnordered:
as_fcmp_cueq(fmt, lhs, rhs, fcc);
break;
case DoubleNotEqualOrUnordered:
as_fcmp_cune(fmt, lhs, rhs, fcc);
break;
case DoubleGreaterThanOrUnordered:
as_fcmp_cult(fmt, rhs, lhs, fcc);
break;
case DoubleGreaterThanOrEqualOrUnordered:
as_fcmp_cule(fmt, rhs, lhs, fcc);
break;
case DoubleLessThanOrUnordered:
as_fcmp_cult(fmt, lhs, rhs, fcc);
break;
case DoubleLessThanOrEqualOrUnordered:
as_fcmp_cule(fmt, lhs, rhs, fcc);
break;
default:
MOZ_CRASH("Invalid DoubleCondition.");
}
}
void MacroAssemblerLOONG64::minMaxDouble(FloatRegister srcDest,
FloatRegister second, bool handleNaN,
bool isMax) {
if (srcDest == second) return;
Label nan, done;
// First or second is NaN, result is NaN.
ma_bc_d(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
if (isMax) {
as_fmax_d(srcDest, srcDest, second);
} else {
as_fmin_d(srcDest, srcDest, second);
}
ma_b(&done, ShortJump);
bind(&nan);
as_fadd_d(srcDest, srcDest, second);
bind(&done);
}
void MacroAssemblerLOONG64::minMaxFloat32(FloatRegister srcDest,
FloatRegister second, bool handleNaN,
bool isMax) {
if (srcDest == second) return;
Label nan, done;
// First or second is NaN, result is NaN.
ma_bc_s(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
if (isMax) {
as_fmax_s(srcDest, srcDest, second);
} else {
as_fmin_s(srcDest, srcDest, second);
}
ma_b(&done, ShortJump);
bind(&nan);
as_fadd_s(srcDest, srcDest, second);
bind(&done);
}
FaultingCodeOffset MacroAssemblerLOONG64::loadDouble(const Address& address,
FloatRegister dest) {
return asMasm().ma_fld_d(dest, address);
}
FaultingCodeOffset MacroAssemblerLOONG64::loadDouble(const BaseIndex& src,
FloatRegister dest) {
return asMasm().ma_fld_d(dest, src);
}
FaultingCodeOffset MacroAssemblerLOONG64::loadFloat32(const Address& address,
FloatRegister dest) {
return asMasm().ma_fld_s(dest, address);
}
FaultingCodeOffset MacroAssemblerLOONG64::loadFloat32(const BaseIndex& src,
FloatRegister dest) {
return asMasm().ma_fld_s(dest, src);
}
void MacroAssemblerLOONG64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
Register memoryBase, Register ptr,
Register ptrScratch,
AnyRegister output, Register tmp) {
access.assertOffsetInGuardPages();
uint32_t offset = access.offset32();
MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
// Maybe add the offset.
if (offset) {
asMasm().addPtr(ImmWord(offset), ptrScratch);
ptr = ptrScratch;
}
asMasm().memoryBarrierBefore(access.sync());
append(access, wasm::TrapMachineInsnForLoad(byteSize(access.type())),
FaultingCodeOffset(currentOffset()));
switch (access.type()) {
case Scalar::Int8:
as_ldx_b(output.gpr(), memoryBase, ptr);
break;
case Scalar::Uint8:
as_ldx_bu(output.gpr(), memoryBase, ptr);
break;
<