[llvm] [RISCV] Add 32 bit GPR sub-register for Zfinx. (PR #108336)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 16 15:58:56 PDT 2024
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/108336
>From 14177d20867e12f9506176a9c9231f8e5efcf8d8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 30 Aug 2024 18:29:19 -0700
Subject: [PATCH 01/13] [RISCV] Add 16 bit GPR sub-register for Zhinx.
This patches adds a 16 bit register class for use with Zhinx instructions.
This makes them more similar to Zfh instructions and allows us to
only spill 16 bits.
I've added CodeGenOnly instructions for load/store using GPRF16 as that
gave better results than insert_subreg/extract_subreg. I'm using FSGNJ for
GPRF16 copy with Zhinx as that gave better results. Zhinxmin will use
ADDI+subreg operations.
Function arguments use this new GPRF16 register class for f16 arguments
with Zhinxmin. Eliminating the need to use RISCVISD::FMV* nodes.
I plan to extend this idea to Zfinx next.
After that, I want to try to extend this to 32 bit integer W instructions.
My thought is that we can arrange to have all writes to the 32 bit
GPR guarantee sign extension similar to how Mip64 is handled. Unfortunately,
we are missing some W instructions in Zba and Zbs that would make this
straightforward.
---
.../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 10 ++
.../RISCV/Disassembler/RISCVDisassembler.cpp | 13 ++
.../RISCV/RISCVDeadRegisterDefinitions.cpp | 9 +-
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 5 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 57 ++++++-
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 23 +++
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 8 +-
llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td | 27 ++--
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 14 +-
llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 112 +++++++++-----
.../CodeGen/RISCV/fastcc-without-f-reg.ll | 140 +++++++++---------
llvm/test/CodeGen/RISCV/half-arith.ll | 20 ++-
.../RISCV/half-bitmanip-dagcombines.ll | 24 ++-
llvm/test/CodeGen/RISCV/half-convert.ll | 12 ++
llvm/test/CodeGen/RISCV/half-imm.ll | 8 +-
llvm/test/CodeGen/RISCV/half-intrinsics.ll | 27 ++--
.../CodeGen/RISCV/half-maximum-minimum.ll | 8 +-
llvm/test/CodeGen/RISCV/half-mem.ll | 4 +-
llvm/test/CodeGen/RISCV/half-select-fcmp.ll | 30 ++--
llvm/test/CodeGen/RISCV/half-select-icmp.ll | 40 ++---
llvm/test/CodeGen/RISCV/kcfi-mir.ll | 4 +-
21 files changed, 398 insertions(+), 197 deletions(-)
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 6d33a1f64195d5..de9591d4cf72ac 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -480,7 +480,13 @@ struct RISCVOperand final : public MCParsedAsmOperand {
RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum);
}
+ bool isGPRF16() const {
+ return Kind == KindTy::Register &&
+ RISCVMCRegisterClasses[RISCV::GPRF16RegClassID].contains(Reg.RegNum);
+ }
+
bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
+ bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
bool isGPRPair() const {
return Kind == KindTy::Register &&
@@ -1341,6 +1347,10 @@ unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
Op.Reg.RegNum = convertFPR64ToFPR16(Reg);
return Match_Success;
}
+ if (Kind == MCK_GPRAsFPR16 && Op.isGPRAsFPR()) {
+ Op.Reg.RegNum = Reg - RISCV::X0 + RISCV::X0_H;
+ return Match_Success;
+ }
// As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce
// the register from VR to VRM2/VRM4/VRM8 if necessary.
if (IsRegVR && (Kind == MCK_VRM2 || Kind == MCK_VRM4 || Kind == MCK_VRM8)) {
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index 23897e2d98f634..76ca7728ebdd3a 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -81,6 +81,19 @@ static DecodeStatus DecodeGPRRegisterClass(MCInst &Inst, uint32_t RegNo,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeGPRF16RegisterClass(MCInst &Inst, uint32_t RegNo,
+ uint64_t Address,
+ const MCDisassembler *Decoder) {
+ bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE);
+
+ if (RegNo >= 32 || (IsRVE && RegNo >= 16))
+ return MCDisassembler::Fail;
+
+ MCRegister Reg = RISCV::X0_H + RegNo;
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeGPRX1X5RegisterClass(MCInst &Inst, uint32_t RegNo,
uint64_t Address,
const MCDisassembler *Decoder) {
diff --git a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
index cce0ffe16e5fe3..713c7a0661defe 100644
--- a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
@@ -93,14 +93,19 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
continue;
LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n ";
MI.print(dbgs()));
+ Register X0Reg;
const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI, MF);
- if (!(RC && RC->contains(RISCV::X0))) {
+ if (RC && RC->contains(RISCV::X0)) {
+ X0Reg = RISCV::X0;
+ } else if (RC && RC->contains(RISCV::X0_H)) {
+ X0Reg = RISCV::X0_H;
+ } else {
LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n");
continue;
}
assert(LIS.hasInterval(Reg));
LIS.removeInterval(Reg);
- MO.setReg(RISCV::X0);
+ MO.setReg(X0Reg);
LLVM_DEBUG(dbgs() << " Replacing with zero register. New:\n ";
MI.print(dbgs()));
++NumDeadDefsReplaced;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 4580f3191d1389..d8db2694213c17 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -959,7 +959,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
}
SDNode *Res;
- if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
+ if (VT.SimpleTy == MVT::f16 && Opc == RISCV::COPY) {
+ Res =
+ CurDAG->getTargetExtractSubreg(RISCV::sub_16, DL, VT, Imm).getNode();
+ } else if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
Res = CurDAG->getMachineNode(
Opc, DL, VT, Imm,
CurDAG->getTargetConstant(RISCVFPRndMode::RNE, DL, XLenVT));
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6b4219b4623847..eb957e24ccee86 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19059,6 +19059,23 @@ ArrayRef<MCPhysReg> RISCV::getArgGPRs(const RISCVABI::ABI ABI) {
return ArrayRef(ArgIGPRs);
}
+static ArrayRef<MCPhysReg> getArgGPR16s(const RISCVABI::ABI ABI) {
+ // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
+ // the ILP32E ABI.
+ static const MCPhysReg ArgIGPRs[] = {RISCV::X10_H, RISCV::X11_H, RISCV::X12_H,
+ RISCV::X13_H, RISCV::X14_H, RISCV::X15_H,
+ RISCV::X16_H, RISCV::X17_H};
+ // The GPRs used for passing arguments in the ILP32E/ILP64E ABI.
+ static const MCPhysReg ArgEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
+ RISCV::X12_H, RISCV::X13_H,
+ RISCV::X14_H, RISCV::X15_H};
+
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return ArrayRef(ArgEGPRs);
+
+ return ArrayRef(ArgIGPRs);
+}
+
static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
// The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
// for save-restore libcall, so we don't use them.
@@ -19077,6 +19094,26 @@ static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
return ArrayRef(FastCCIGPRs);
}
+static ArrayRef<MCPhysReg> getFastCCArgGPRF16s(const RISCVABI::ABI ABI) {
+ // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
+ // for save-restore libcall, so we don't use them.
+ // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
+ static const MCPhysReg FastCCIGPRs[] = {
+ RISCV::X10_H, RISCV::X11_H, RISCV::X12_H, RISCV::X13_H,
+ RISCV::X14_H, RISCV::X15_H, RISCV::X16_H, RISCV::X17_H,
+ RISCV::X28_H, RISCV::X29_H, RISCV::X30_H, RISCV::X31_H};
+
+ // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E.
+ static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_H, RISCV::X11_H,
+ RISCV::X12_H, RISCV::X13_H,
+ RISCV::X14_H, RISCV::X15_H};
+
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return ArrayRef(FastCCEGPRs);
+
+ return ArrayRef(FastCCIGPRs);
+}
+
// Pass a 2*XLEN argument that has been split into two XLEN values through
// registers or the stack as necessary.
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
@@ -19225,6 +19262,15 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
// similar local variables rather than directly checking against the target
// ABI.
+ const RISCVSubtarget &STI =
+ State.getMachineFunction().getSubtarget<RISCVSubtarget>();
+ if ((ValVT == MVT::f16 && STI.hasStdExtZhinxmin())) {
+ if (MCRegister Reg = State.AllocateReg(getArgGPR16s(ABI))) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::bf16 ||
@@ -19685,8 +19731,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
}
// Check if there is an available GPR before hitting the stack.
- if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) ||
- (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
+ if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
(LocVT == MVT::f64 && Subtarget.is64Bit() &&
Subtarget.hasStdExtZdinx())) {
if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
@@ -19703,6 +19748,14 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
}
}
+ // Check if there is an available GPRF16 before hitting the stack.
+ if ((LocVT == MVT::f16 && Subtarget.hasStdExtZhinxmin())) {
+ if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF16s(ABI))) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
if (LocVT == MVT::f16) {
unsigned Offset2 = State.AllocateStack(2, Align(2));
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset2, LocVT, LocInfo));
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 0a64a8e1440084..cb1840a2c60130 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -452,6 +452,23 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
+ if (RISCV::GPRF16RegClass.contains(DstReg, SrcReg)) {
+ if (STI.hasStdExtZhinx()) {
+ BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_H_INX), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+ DstReg =
+ TRI->getMatchingSuperReg(DstReg, RISCV::sub_16, &RISCV::GPRRegClass);
+ SrcReg =
+ TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16, &RISCV::GPRRegClass);
+ BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addImm(0);
+ return;
+ }
+
if (RISCV::GPRPairRegClass.contains(DstReg, SrcReg)) {
// Emit an ADDI for both parts of GPRPair.
BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
@@ -573,6 +590,9 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
RISCV::SW : RISCV::SD;
IsScalableVector = false;
+ } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::SH_INX;
+ IsScalableVector = false;
} else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
Opcode = RISCV::PseudoRV32ZdinxSD;
IsScalableVector = false;
@@ -656,6 +676,9 @@ void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
Opcode = TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
RISCV::LW : RISCV::LD;
IsScalableVector = false;
+ } else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::LH_INX;
+ IsScalableVector = false;
} else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
Opcode = RISCV::PseudoRV32ZdinxLD;
IsScalableVector = false;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 6d0952a42eda9f..deb7c8b8435b8b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -514,8 +514,8 @@ class BranchCC_rri<bits<3> funct3, string opcodestr>
}
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
-class Load_ri<bits<3> funct3, string opcodestr>
- : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPRMem:$rs1, simm12:$imm12),
+class Load_ri<bits<3> funct3, string opcodestr, DAGOperand rty = GPR>
+ : RVInstI<funct3, OPC_LOAD, (outs rty:$rd), (ins GPRMem:$rs1, simm12:$imm12),
opcodestr, "$rd, ${imm12}(${rs1})">;
class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr>
@@ -529,9 +529,9 @@ class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr>
// reflecting the order these fields are specified in the instruction
// encoding.
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
-class Store_rri<bits<3> funct3, string opcodestr>
+class Store_rri<bits<3> funct3, string opcodestr, DAGOperand rty = GPR>
: RVInstS<funct3, OPC_STORE, (outs),
- (ins GPR:$rs2, GPRMem:$rs1, simm12:$imm12),
+ (ins rty:$rs2, GPRMem:$rs1, simm12:$imm12),
opcodestr, "$rs2, ${imm12}(${rs1})">;
class HStore_rr<bits<7> funct7, string opcodestr>
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 792cb7fa6dbc2f..7f417d29fc6c2d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -33,9 +33,15 @@ def riscv_fmv_x_signexth
// Zhinxmin and Zhinx
+def GPRAsFPR16 : AsmOperandClass {
+ let Name = "GPRAsFPR16";
+ let ParserMethod = "parseGPRAsFPR";
+ let RenderMethod = "addRegOperands";
+}
+
def FPR16INX : RegisterOperand<GPRF16> {
- let ParserMatchClass = GPRAsFPR;
- let DecoderMethod = "DecodeGPRRegisterClass";
+ let ParserMatchClass = GPRAsFPR16;
+ let DecoderMethod = "DecodeGPRF16RegisterClass";
}
def ZfhExt : ExtInfo<"", "", [HasStdExtZfh],
@@ -84,6 +90,12 @@ def FLH : FPLoad_r<0b001, "flh", FPR16, WriteFLD16>;
def FSH : FPStore_r<0b001, "fsh", FPR16, WriteFST16>;
} // Predicates = [HasHalfFPLoadStoreMove]
+let Predicates = [HasStdExtZhinxmin], isCodeGenOnly = 1 in {
+def LH_INX : Load_ri<0b001, "lh", GPRF16>, Sched<[WriteLDH, ReadMemBase]>;
+def SH_INX : Store_rri<0b001, "sh", GPRF16>,
+ Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
+}
+
foreach Ext = ZfhExts in {
let SchedRW = [WriteFMA16, ReadFMA16, ReadFMA16, ReadFMA16Addend] in {
defm FMADD_H : FPFMA_rrr_frm_m<OPC_MADD, 0b10, "fmadd.h", Ext>;
@@ -426,13 +438,10 @@ let Predicates = [HasStdExtZhinxmin] in {
defm Select_FPR16INX : SelectCC_GPR_rrirr<FPR16INX, f16>;
/// Loads
-def : Pat<(f16 (load (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))),
- (COPY_TO_REGCLASS (LH GPR:$rs1, simm12:$imm12), GPRF16)>;
+def : LdPat<load, LH_INX, f16>;
/// Stores
-def : Pat<(store (f16 FPR16INX:$rs2),
- (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12)),
- (SH (COPY_TO_REGCLASS FPR16INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
+def : StPat<store, SH_INX, GPRF16, f16>;
} // Predicates = [HasStdExtZhinxmin]
let Predicates = [HasStdExtZfhmin] in {
@@ -458,8 +467,8 @@ def : Pat<(any_fpround FPR32INX:$rs1), (FCVT_H_S_INX FPR32INX:$rs1, FRM_DYN)>;
def : Pat<(any_fpextend FPR16INX:$rs1), (FCVT_S_H_INX FPR16INX:$rs1, FRM_RNE)>;
// Moves (no conversion)
-def : Pat<(f16 (riscv_fmv_h_x GPR:$src)), (COPY_TO_REGCLASS GPR:$src, GPR)>;
-def : Pat<(riscv_fmv_x_anyexth FPR16INX:$src), (COPY_TO_REGCLASS FPR16INX:$src, GPR)>;
+def : Pat<(f16 (riscv_fmv_h_x GPR:$src)), (EXTRACT_SUBREG GPR:$src, sub_16)>;
+def : Pat<(riscv_fmv_x_anyexth FPR16INX:$src), (INSERT_SUBREG (XLenVT (IMPLICIT_DEF)), FPR16INX:$src, sub_16)>;
def : Pat<(fcopysign FPR32INX:$rs1, FPR16INX:$rs2), (FSGNJ_S_INX $rs1, (FCVT_S_H_INX $rs2, FRM_RNE))>;
} // Predicates = [HasStdExtZhinxmin]
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 701594c0fb05dc..2bd41386e2dfb9 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -115,11 +115,11 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
}
// Use markSuperRegs to ensure any register aliases are also reserved
- markSuperRegs(Reserved, RISCV::X2); // sp
- markSuperRegs(Reserved, RISCV::X3); // gp
- markSuperRegs(Reserved, RISCV::X4); // tp
+ markSuperRegs(Reserved, RISCV::X2_H); // sp
+ markSuperRegs(Reserved, RISCV::X3_H); // gp
+ markSuperRegs(Reserved, RISCV::X4_H); // tp
if (TFI->hasFP(MF))
- markSuperRegs(Reserved, RISCV::X8); // fp
+ markSuperRegs(Reserved, RISCV::X8_H); // fp
// Reserve the base register if we need to realign the stack and allocate
// variable-sized objects at runtime.
if (TFI->hasBP(MF))
@@ -131,7 +131,7 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// There are only 16 GPRs for RVE.
if (Subtarget.hasStdExtE())
- for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
+ for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
markSuperRegs(Reserved, Reg);
// V registers for code generation. We handle them manually.
@@ -150,8 +150,8 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
if (MF.getFunction().getCallingConv() == CallingConv::GRAAL) {
if (Subtarget.hasStdExtE())
report_fatal_error("Graal reserved registers do not exist in RVE");
- markSuperRegs(Reserved, RISCV::X23);
- markSuperRegs(Reserved, RISCV::X27);
+ markSuperRegs(Reserved, RISCV::X23_H);
+ markSuperRegs(Reserved, RISCV::X27_H);
}
// Shadow stack pointer.
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 5725d8eda88ced..37a1643ef5236f 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -83,41 +83,81 @@ def sub_gpr_odd : SubRegIndex<32, 32> {
let RegAltNameIndices = [ABIRegAltName] in {
let isConstant = true in
- def X0 : RISCVReg<0, "x0", ["zero"]>, DwarfRegNum<[0]>;
+ def X0_H : RISCVReg<0, "x0", ["zero"]>;
let CostPerUse = [0, 1] in {
- def X1 : RISCVReg<1, "x1", ["ra"]>, DwarfRegNum<[1]>;
- def X2 : RISCVReg<2, "x2", ["sp"]>, DwarfRegNum<[2]>;
- def X3 : RISCVReg<3, "x3", ["gp"]>, DwarfRegNum<[3]>;
- def X4 : RISCVReg<4, "x4", ["tp"]>, DwarfRegNum<[4]>;
- def X5 : RISCVReg<5, "x5", ["t0"]>, DwarfRegNum<[5]>;
- def X6 : RISCVReg<6, "x6", ["t1"]>, DwarfRegNum<[6]>;
- def X7 : RISCVReg<7, "x7", ["t2"]>, DwarfRegNum<[7]>;
+ def X1_H : RISCVReg<1, "x1", ["ra"]>;
+ def X2_H : RISCVReg<2, "x2", ["sp"]>;
+ def X3_H : RISCVReg<3, "x3", ["gp"]>;
+ def X4_H : RISCVReg<4, "x4", ["tp"]>;
+ def X5_H : RISCVReg<5, "x5", ["t0"]>;
+ def X6_H : RISCVReg<6, "x6", ["t1"]>;
+ def X7_H : RISCVReg<7, "x7", ["t2"]>;
}
- def X8 : RISCVReg<8, "x8", ["s0", "fp"]>, DwarfRegNum<[8]>;
- def X9 : RISCVReg<9, "x9", ["s1"]>, DwarfRegNum<[9]>;
- def X10 : RISCVReg<10,"x10", ["a0"]>, DwarfRegNum<[10]>;
- def X11 : RISCVReg<11,"x11", ["a1"]>, DwarfRegNum<[11]>;
- def X12 : RISCVReg<12,"x12", ["a2"]>, DwarfRegNum<[12]>;
- def X13 : RISCVReg<13,"x13", ["a3"]>, DwarfRegNum<[13]>;
- def X14 : RISCVReg<14,"x14", ["a4"]>, DwarfRegNum<[14]>;
- def X15 : RISCVReg<15,"x15", ["a5"]>, DwarfRegNum<[15]>;
+ def X8_H : RISCVReg<8, "x8", ["s0", "fp"]>;
+ def X9_H : RISCVReg<9, "x9", ["s1"]>;
+ def X10_H : RISCVReg<10,"x10", ["a0"]>;
+ def X11_H : RISCVReg<11,"x11", ["a1"]>;
+ def X12_H : RISCVReg<12,"x12", ["a2"]>;
+ def X13_H : RISCVReg<13,"x13", ["a3"]>;
+ def X14_H : RISCVReg<14,"x14", ["a4"]>;
+ def X15_H : RISCVReg<15,"x15", ["a5"]>;
let CostPerUse = [0, 1] in {
- def X16 : RISCVReg<16,"x16", ["a6"]>, DwarfRegNum<[16]>;
- def X17 : RISCVReg<17,"x17", ["a7"]>, DwarfRegNum<[17]>;
- def X18 : RISCVReg<18,"x18", ["s2"]>, DwarfRegNum<[18]>;
- def X19 : RISCVReg<19,"x19", ["s3"]>, DwarfRegNum<[19]>;
- def X20 : RISCVReg<20,"x20", ["s4"]>, DwarfRegNum<[20]>;
- def X21 : RISCVReg<21,"x21", ["s5"]>, DwarfRegNum<[21]>;
- def X22 : RISCVReg<22,"x22", ["s6"]>, DwarfRegNum<[22]>;
- def X23 : RISCVReg<23,"x23", ["s7"]>, DwarfRegNum<[23]>;
- def X24 : RISCVReg<24,"x24", ["s8"]>, DwarfRegNum<[24]>;
- def X25 : RISCVReg<25,"x25", ["s9"]>, DwarfRegNum<[25]>;
- def X26 : RISCVReg<26,"x26", ["s10"]>, DwarfRegNum<[26]>;
- def X27 : RISCVReg<27,"x27", ["s11"]>, DwarfRegNum<[27]>;
- def X28 : RISCVReg<28,"x28", ["t3"]>, DwarfRegNum<[28]>;
- def X29 : RISCVReg<29,"x29", ["t4"]>, DwarfRegNum<[29]>;
- def X30 : RISCVReg<30,"x30", ["t5"]>, DwarfRegNum<[30]>;
- def X31 : RISCVReg<31,"x31", ["t6"]>, DwarfRegNum<[31]>;
+ def X16_H : RISCVReg<16,"x16", ["a6"]>;
+ def X17_H : RISCVReg<17,"x17", ["a7"]>;
+ def X18_H : RISCVReg<18,"x18", ["s2"]>;
+ def X19_H : RISCVReg<19,"x19", ["s3"]>;
+ def X20_H : RISCVReg<20,"x20", ["s4"]>;
+ def X21_H : RISCVReg<21,"x21", ["s5"]>;
+ def X22_H : RISCVReg<22,"x22", ["s6"]>;
+ def X23_H : RISCVReg<23,"x23", ["s7"]>;
+ def X24_H : RISCVReg<24,"x24", ["s8"]>;
+ def X25_H : RISCVReg<25,"x25", ["s9"]>;
+ def X26_H : RISCVReg<26,"x26", ["s10"]>;
+ def X27_H : RISCVReg<27,"x27", ["s11"]>;
+ def X28_H : RISCVReg<28,"x28", ["t3"]>;
+ def X29_H : RISCVReg<29,"x29", ["t4"]>;
+ def X30_H : RISCVReg<30,"x30", ["t5"]>;
+ def X31_H : RISCVReg<31,"x31", ["t6"]>;
+ }
+
+ let SubRegIndices = [sub_16] in {
+ let isConstant = true in
+ def X0 : RISCVRegWithSubRegs<0, "x0", [X0_H], ["zero"]>, DwarfRegNum<[0]>;
+ let CostPerUse = [0, 1] in {
+ def X1 : RISCVRegWithSubRegs<1, "x1", [X1_H], ["ra"]>, DwarfRegNum<[1]>;
+ def X2 : RISCVRegWithSubRegs<2, "x2", [X2_H], ["sp"]>, DwarfRegNum<[2]>;
+ def X3 : RISCVRegWithSubRegs<3, "x3", [X3_H], ["gp"]>, DwarfRegNum<[3]>;
+ def X4 : RISCVRegWithSubRegs<4, "x4", [X4_H], ["tp"]>, DwarfRegNum<[4]>;
+ def X5 : RISCVRegWithSubRegs<5, "x5", [X5_H], ["t0"]>, DwarfRegNum<[5]>;
+ def X6 : RISCVRegWithSubRegs<6, "x6", [X6_H], ["t1"]>, DwarfRegNum<[6]>;
+ def X7 : RISCVRegWithSubRegs<7, "x7", [X7_H], ["t2"]>, DwarfRegNum<[7]>;
+ }
+ def X8 : RISCVRegWithSubRegs<8, "x8", [X8_H], ["s0", "fp"]>, DwarfRegNum<[8]>;
+ def X9 : RISCVRegWithSubRegs<9, "x9", [X9_H], ["s1"]>, DwarfRegNum<[9]>;
+ def X10 : RISCVRegWithSubRegs<10,"x10", [X10_H], ["a0"]>, DwarfRegNum<[10]>;
+ def X11 : RISCVRegWithSubRegs<11,"x11", [X11_H], ["a1"]>, DwarfRegNum<[11]>;
+ def X12 : RISCVRegWithSubRegs<12,"x12", [X12_H], ["a2"]>, DwarfRegNum<[12]>;
+ def X13 : RISCVRegWithSubRegs<13,"x13", [X13_H], ["a3"]>, DwarfRegNum<[13]>;
+ def X14 : RISCVRegWithSubRegs<14,"x14", [X14_H], ["a4"]>, DwarfRegNum<[14]>;
+ def X15 : RISCVRegWithSubRegs<15,"x15", [X15_H], ["a5"]>, DwarfRegNum<[15]>;
+ let CostPerUse = [0, 1] in {
+ def X16 : RISCVRegWithSubRegs<16,"x16", [X16_H], ["a6"]>, DwarfRegNum<[16]>;
+ def X17 : RISCVRegWithSubRegs<17,"x17", [X17_H], ["a7"]>, DwarfRegNum<[17]>;
+ def X18 : RISCVRegWithSubRegs<18,"x18", [X18_H], ["s2"]>, DwarfRegNum<[18]>;
+ def X19 : RISCVRegWithSubRegs<19,"x19", [X19_H], ["s3"]>, DwarfRegNum<[19]>;
+ def X20 : RISCVRegWithSubRegs<20,"x20", [X20_H], ["s4"]>, DwarfRegNum<[20]>;
+ def X21 : RISCVRegWithSubRegs<21,"x21", [X21_H], ["s5"]>, DwarfRegNum<[21]>;
+ def X22 : RISCVRegWithSubRegs<22,"x22", [X22_H], ["s6"]>, DwarfRegNum<[22]>;
+ def X23 : RISCVRegWithSubRegs<23,"x23", [X23_H], ["s7"]>, DwarfRegNum<[23]>;
+ def X24 : RISCVRegWithSubRegs<24,"x24", [X24_H], ["s8"]>, DwarfRegNum<[24]>;
+ def X25 : RISCVRegWithSubRegs<25,"x25", [X25_H], ["s9"]>, DwarfRegNum<[25]>;
+ def X26 : RISCVRegWithSubRegs<26,"x26", [X26_H], ["s10"]>, DwarfRegNum<[26]>;
+ def X27 : RISCVRegWithSubRegs<27,"x27", [X27_H], ["s11"]>, DwarfRegNum<[27]>;
+ def X28 : RISCVRegWithSubRegs<28,"x28", [X28_H], ["t3"]>, DwarfRegNum<[28]>;
+ def X29 : RISCVRegWithSubRegs<29,"x29", [X29_H], ["t4"]>, DwarfRegNum<[29]>;
+ def X30 : RISCVRegWithSubRegs<30,"x30", [X30_H], ["t5"]>, DwarfRegNum<[30]>;
+ def X31 : RISCVRegWithSubRegs<31,"x31", [X31_H], ["t6"]>, DwarfRegNum<[31]>;
+ }
}
}
@@ -565,8 +605,14 @@ def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
def VMV0 : VReg<VMaskVTs, (add V0), 1>;
+def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
+ (sequence "X%u_H", 5, 7),
+ (sequence "X%u_H", 28, 31),
+ (sequence "X%u_H", 8, 9),
+ (sequence "X%u_H", 18, 27),
+ (sequence "X%u_H", 0, 4))>;
+
let RegInfos = XLenRI in {
-def GPRF16 : RISCVRegisterClass<[f16], 16, (add GPR)>;
def GPRF32 : RISCVRegisterClass<[f32], 32, (add GPR)>;
} // RegInfos = XLenRI
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index 2e7ca025314705..a5029fa1d76c5c 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -260,14 +260,14 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZHINX32-NEXT: sw s9, 68(sp) # 4-byte Folded Spill
; ZHINX32-NEXT: sw s10, 64(sp) # 4-byte Folded Spill
; ZHINX32-NEXT: sw s11, 60(sp) # 4-byte Folded Spill
-; ZHINX32-NEXT: lh t0, 124(sp)
-; ZHINX32-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
-; ZHINX32-NEXT: lh t0, 120(sp)
-; ZHINX32-NEXT: sw t0, 52(sp) # 4-byte Folded Spill
-; ZHINX32-NEXT: lh t0, 116(sp)
-; ZHINX32-NEXT: sw t0, 48(sp) # 4-byte Folded Spill
; ZHINX32-NEXT: lh t0, 112(sp)
-; ZHINX32-NEXT: sw t0, 44(sp) # 4-byte Folded Spill
+; ZHINX32-NEXT: sh t0, 58(sp) # 2-byte Folded Spill
+; ZHINX32-NEXT: lh t0, 116(sp)
+; ZHINX32-NEXT: sh t0, 56(sp) # 2-byte Folded Spill
+; ZHINX32-NEXT: lh t0, 120(sp)
+; ZHINX32-NEXT: sh t0, 54(sp) # 2-byte Folded Spill
+; ZHINX32-NEXT: lh t0, 124(sp)
+; ZHINX32-NEXT: sh t0, 52(sp) # 2-byte Folded Spill
; ZHINX32-NEXT: lh t6, 128(sp)
; ZHINX32-NEXT: lh t5, 132(sp)
; ZHINX32-NEXT: lh t4, 136(sp)
@@ -308,10 +308,10 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZHINX32-NEXT: sh t4, 4(sp)
; ZHINX32-NEXT: sh t5, 2(sp)
; ZHINX32-NEXT: sh t6, 0(sp)
-; ZHINX32-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT: lw t4, 48(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT: lw t5, 52(sp) # 4-byte Folded Reload
-; ZHINX32-NEXT: lw t6, 56(sp) # 4-byte Folded Reload
+; ZHINX32-NEXT: lh t3, 58(sp) # 2-byte Folded Reload
+; ZHINX32-NEXT: lh t4, 56(sp) # 2-byte Folded Reload
+; ZHINX32-NEXT: lh t5, 54(sp) # 2-byte Folded Reload
+; ZHINX32-NEXT: lh t6, 52(sp) # 2-byte Folded Reload
; ZHINX32-NEXT: call callee_half_32
; ZHINX32-NEXT: lw ra, 108(sp) # 4-byte Folded Reload
; ZHINX32-NEXT: lw s0, 104(sp) # 4-byte Folded Reload
@@ -331,48 +331,48 @@ define half @caller_half_32(<32 x half> %A) nounwind {
;
; ZHINX64-LABEL: caller_half_32:
; ZHINX64: # %bb.0:
-; ZHINX64-NEXT: addi sp, sp, -176
-; ZHINX64-NEXT: sd ra, 168(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s0, 160(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s1, 152(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s2, 144(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s3, 136(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s4, 128(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s5, 120(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s6, 112(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s7, 104(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s8, 96(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s9, 88(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s10, 80(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s11, 72(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lh t0, 200(sp)
-; ZHINX64-NEXT: sd t0, 64(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lh t0, 192(sp)
-; ZHINX64-NEXT: sd t0, 56(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lh t0, 184(sp)
-; ZHINX64-NEXT: sd t0, 48(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: addi sp, sp, -160
+; ZHINX64-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s8, 80(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: lh t0, 160(sp)
+; ZHINX64-NEXT: sh t0, 54(sp) # 2-byte Folded Spill
+; ZHINX64-NEXT: lh t0, 168(sp)
+; ZHINX64-NEXT: sh t0, 52(sp) # 2-byte Folded Spill
; ZHINX64-NEXT: lh t0, 176(sp)
-; ZHINX64-NEXT: sd t0, 40(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lh t6, 208(sp)
-; ZHINX64-NEXT: lh t5, 216(sp)
-; ZHINX64-NEXT: lh t4, 224(sp)
-; ZHINX64-NEXT: lh s0, 232(sp)
-; ZHINX64-NEXT: lh s1, 240(sp)
-; ZHINX64-NEXT: lh s2, 248(sp)
-; ZHINX64-NEXT: lh s3, 256(sp)
-; ZHINX64-NEXT: lh s4, 264(sp)
-; ZHINX64-NEXT: lh s5, 272(sp)
-; ZHINX64-NEXT: lh s6, 280(sp)
-; ZHINX64-NEXT: lh s7, 288(sp)
-; ZHINX64-NEXT: lh s8, 296(sp)
-; ZHINX64-NEXT: lh s9, 304(sp)
-; ZHINX64-NEXT: lh s10, 312(sp)
-; ZHINX64-NEXT: lh s11, 320(sp)
-; ZHINX64-NEXT: lh ra, 328(sp)
-; ZHINX64-NEXT: lh t3, 336(sp)
-; ZHINX64-NEXT: lh t2, 344(sp)
-; ZHINX64-NEXT: lh t1, 352(sp)
-; ZHINX64-NEXT: lh t0, 360(sp)
+; ZHINX64-NEXT: sh t0, 50(sp) # 2-byte Folded Spill
+; ZHINX64-NEXT: lh t0, 184(sp)
+; ZHINX64-NEXT: sh t0, 48(sp) # 2-byte Folded Spill
+; ZHINX64-NEXT: lh t6, 192(sp)
+; ZHINX64-NEXT: lh t5, 200(sp)
+; ZHINX64-NEXT: lh t4, 208(sp)
+; ZHINX64-NEXT: lh s0, 216(sp)
+; ZHINX64-NEXT: lh s1, 224(sp)
+; ZHINX64-NEXT: lh s2, 232(sp)
+; ZHINX64-NEXT: lh s3, 240(sp)
+; ZHINX64-NEXT: lh s4, 248(sp)
+; ZHINX64-NEXT: lh s5, 256(sp)
+; ZHINX64-NEXT: lh s6, 264(sp)
+; ZHINX64-NEXT: lh s7, 272(sp)
+; ZHINX64-NEXT: lh s8, 280(sp)
+; ZHINX64-NEXT: lh s9, 288(sp)
+; ZHINX64-NEXT: lh s10, 296(sp)
+; ZHINX64-NEXT: lh s11, 304(sp)
+; ZHINX64-NEXT: lh ra, 312(sp)
+; ZHINX64-NEXT: lh t3, 320(sp)
+; ZHINX64-NEXT: lh t2, 328(sp)
+; ZHINX64-NEXT: lh t1, 336(sp)
+; ZHINX64-NEXT: lh t0, 344(sp)
; ZHINX64-NEXT: sh t0, 38(sp)
; ZHINX64-NEXT: sh t1, 36(sp)
; ZHINX64-NEXT: sh t2, 34(sp)
@@ -393,25 +393,25 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZHINX64-NEXT: sh t4, 4(sp)
; ZHINX64-NEXT: sh t5, 2(sp)
; ZHINX64-NEXT: sh t6, 0(sp)
-; ZHINX64-NEXT: ld t3, 40(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t4, 48(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t6, 64(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: lh t3, 54(sp) # 2-byte Folded Reload
+; ZHINX64-NEXT: lh t4, 52(sp) # 2-byte Folded Reload
+; ZHINX64-NEXT: lh t5, 50(sp) # 2-byte Folded Reload
+; ZHINX64-NEXT: lh t6, 48(sp) # 2-byte Folded Reload
; ZHINX64-NEXT: call callee_half_32
-; ZHINX64-NEXT: ld ra, 168(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s0, 160(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s1, 152(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s2, 144(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s3, 136(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s4, 128(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s5, 120(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s6, 112(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s7, 104(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s8, 96(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s9, 88(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s10, 80(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s11, 72(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: addi sp, sp, 176
+; ZHINX64-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s8, 80(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: addi sp, sp, 160
; ZHINX64-NEXT: ret
;
; ZFINX32-LABEL: caller_half_32:
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index b033c75eeadd8b..78f1ff4169dee2 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -466,20 +466,26 @@ define half @fsgnj_h(half %a, half %b) nounwind {
;
; RV32IZHINXMIN-LABEL: fsgnj_h:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: lui a2, 1048568
; RV32IZHINXMIN-NEXT: and a1, a1, a2
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
; RV32IZHINXMIN-NEXT: or a0, a0, a1
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: fsgnj_h:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: lui a2, 1048568
; RV64IZHINXMIN-NEXT: and a1, a1, a2
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
; RV64IZHINXMIN-NEXT: or a0, a0, a1
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = call half @llvm.copysign.f16(half %a, half %b)
ret half %1
@@ -725,6 +731,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
;
; RV32IZHINXMIN-LABEL: fsgnjn_h:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1
; RV32IZHINXMIN-NEXT: fcvt.s.h a2, a0
; RV32IZHINXMIN-NEXT: fadd.s a1, a2, a1
@@ -735,10 +742,12 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
; RV32IZHINXMIN-NEXT: or a0, a0, a1
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: fsgnjn_h:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1
; RV64IZHINXMIN-NEXT: fcvt.s.h a2, a0
; RV64IZHINXMIN-NEXT: fadd.s a1, a2, a1
@@ -749,6 +758,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
; RV64IZHINXMIN-NEXT: or a0, a0, a1
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = fadd half %a, %b
%2 = fneg half %1
@@ -1702,8 +1712,7 @@ define half @fnmadd_h_3(half %a, half %b, half %c) nounwind {
; CHECKIZHINX-LABEL: fnmadd_h_3:
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: fmadd.h a0, a0, a1, a2
-; CHECKIZHINX-NEXT: lui a1, 1048568
-; CHECKIZHINX-NEXT: xor a0, a0, a1
+; CHECKIZHINX-NEXT: fneg.h a0, a0
; CHECKIZHINX-NEXT: ret
;
; RV32I-LABEL: fnmadd_h_3:
@@ -1798,6 +1807,7 @@ define half @fnmadd_h_3(half %a, half %b, half %c) nounwind {
; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECKIZHINXMIN-NEXT: lui a1, 1048568
; CHECKIZHINXMIN-NEXT: xor a0, a0, a1
+; CHECKIZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECKIZHINXMIN-NEXT: ret
%1 = call half @llvm.fma.f16(half %a, half %b, half %c)
%neg = fneg half %1
@@ -1823,9 +1833,7 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
;
; CHECKIZHINX-LABEL: fnmadd_nsz:
; CHECKIZHINX: # %bb.0:
-; CHECKIZHINX-NEXT: fmadd.h a0, a0, a1, a2
-; CHECKIZHINX-NEXT: lui a1, 1048568
-; CHECKIZHINX-NEXT: xor a0, a0, a1
+; CHECKIZHINX-NEXT: fnmadd.h a0, a0, a1, a2
; CHECKIZHINX-NEXT: ret
;
; RV32I-LABEL: fnmadd_nsz:
@@ -1920,6 +1928,7 @@ define half @fnmadd_nsz(half %a, half %b, half %c) nounwind {
; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0
; CHECKIZHINXMIN-NEXT: lui a1, 1048568
; CHECKIZHINXMIN-NEXT: xor a0, a0, a1
+; CHECKIZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECKIZHINXMIN-NEXT: ret
%1 = call nsz half @llvm.fma.f16(half %a, half %b, half %c)
%neg = fneg nsz half %1
@@ -2912,6 +2921,7 @@ define half @fsgnjx_f16(half %x, half %y) nounwind {
;
; CHECKIZHINXMIN-LABEL: fsgnjx_f16:
; CHECKIZHINXMIN: # %bb.0:
+; CHECKIZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECKIZHINXMIN-NEXT: lui a2, 1048568
; CHECKIZHINXMIN-NEXT: and a0, a0, a2
; CHECKIZHINXMIN-NEXT: li a2, 15
diff --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
index 506b7027a8b35f..e0c47bfac6fec8 100644
--- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll
@@ -55,14 +55,12 @@ define half @fneg(half %a) nounwind {
;
; RV32IZHINX-LABEL: fneg:
; RV32IZHINX: # %bb.0:
-; RV32IZHINX-NEXT: lui a1, 1048568
-; RV32IZHINX-NEXT: xor a0, a0, a1
+; RV32IZHINX-NEXT: fneg.h a0, a0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: fneg:
; RV64IZHINX: # %bb.0:
-; RV64IZHINX-NEXT: lui a1, 1048568
-; RV64IZHINX-NEXT: xor a0, a0, a1
+; RV64IZHINX-NEXT: fneg.h a0, a0
; RV64IZHINX-NEXT: ret
;
; RV32IZFHMIN-LABEL: fneg:
@@ -79,8 +77,10 @@ define half @fneg(half %a) nounwind {
;
; RVIZHINXMIN-LABEL: fneg:
; RVIZHINXMIN: # %bb.0:
+; RVIZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RVIZHINXMIN-NEXT: lui a1, 1048568
; RVIZHINXMIN-NEXT: xor a0, a0, a1
+; RVIZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RVIZHINXMIN-NEXT: ret
%1 = fneg half %a
ret half %1
@@ -115,14 +115,12 @@ define half @fabs(half %a) nounwind {
;
; RV32IZHINX-LABEL: fabs:
; RV32IZHINX: # %bb.0:
-; RV32IZHINX-NEXT: slli a0, a0, 17
-; RV32IZHINX-NEXT: srli a0, a0, 17
+; RV32IZHINX-NEXT: fabs.h a0, a0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: fabs:
; RV64IZHINX: # %bb.0:
-; RV64IZHINX-NEXT: slli a0, a0, 49
-; RV64IZHINX-NEXT: srli a0, a0, 49
+; RV64IZHINX-NEXT: fabs.h a0, a0
; RV64IZHINX-NEXT: ret
;
; RV32IZFHMIN-LABEL: fabs:
@@ -139,14 +137,18 @@ define half @fabs(half %a) nounwind {
;
; RV32IZHINXMIN-LABEL: fabs:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: fabs:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = call half @llvm.fabs.f16(half %a)
ret half %1
@@ -227,22 +229,28 @@ define half @fcopysign_fneg(half %a, half %b) nounwind {
;
; RV32IZHINXMIN-LABEL: fcopysign_fneg:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: not a1, a1
; RV32IZHINXMIN-NEXT: lui a2, 1048568
; RV32IZHINXMIN-NEXT: and a1, a1, a2
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
; RV32IZHINXMIN-NEXT: or a0, a0, a1
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: fcopysign_fneg:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: not a1, a1
; RV64IZHINXMIN-NEXT: lui a2, 1048568
; RV64IZHINXMIN-NEXT: and a1, a1, a2
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
; RV64IZHINXMIN-NEXT: or a0, a0, a1
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = fneg half %b
%2 = call half @llvm.copysign.f16(half %a, half %1)
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 32f7dfaee8837c..16eced073d99f3 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -5564,10 +5564,12 @@ define half @bitcast_h_i16(i16 %a) nounwind {
;
; CHECKIZHINX-LABEL: bitcast_h_i16:
; CHECKIZHINX: # %bb.0:
+; CHECKIZHINX-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECKIZHINX-NEXT: ret
;
; CHECKIZDINXZHINX-LABEL: bitcast_h_i16:
; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: bitcast_h_i16:
@@ -5616,18 +5618,22 @@ define half @bitcast_h_i16(i16 %a) nounwind {
;
; CHECK32-IZHINXMIN-LABEL: bitcast_h_i16:
; CHECK32-IZHINXMIN: # %bb.0:
+; CHECK32-IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECK32-IZHINXMIN-NEXT: ret
;
; CHECK64-IZHINXMIN-LABEL: bitcast_h_i16:
; CHECK64-IZHINXMIN: # %bb.0:
+; CHECK64-IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECK64-IZHINXMIN-NEXT: ret
;
; CHECK32-IZDINXZHINXMIN-LABEL: bitcast_h_i16:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
+; CHECK32-IZDINXZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: bitcast_h_i16:
; CHECK64-IZDINXZHINXMIN: # %bb.0:
+; CHECK64-IZDINXZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; CHECK64-IZDINXZHINXMIN-NEXT: ret
%1 = bitcast i16 %a to half
ret half %1
@@ -5651,10 +5657,12 @@ define i16 @bitcast_i16_h(half %a) nounwind {
;
; CHECKIZHINX-LABEL: bitcast_i16_h:
; CHECKIZHINX: # %bb.0:
+; CHECKIZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECKIZHINX-NEXT: ret
;
; CHECKIZDINXZHINX-LABEL: bitcast_i16_h:
; CHECKIZDINXZHINX: # %bb.0:
+; CHECKIZDINXZHINX-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECKIZDINXZHINX-NEXT: ret
;
; RV32I-LABEL: bitcast_i16_h:
@@ -5695,18 +5703,22 @@ define i16 @bitcast_i16_h(half %a) nounwind {
;
; CHECK32-IZHINXMIN-LABEL: bitcast_i16_h:
; CHECK32-IZHINXMIN: # %bb.0:
+; CHECK32-IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECK32-IZHINXMIN-NEXT: ret
;
; CHECK64-IZHINXMIN-LABEL: bitcast_i16_h:
; CHECK64-IZHINXMIN: # %bb.0:
+; CHECK64-IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECK64-IZHINXMIN-NEXT: ret
;
; CHECK32-IZDINXZHINXMIN-LABEL: bitcast_i16_h:
; CHECK32-IZDINXZHINXMIN: # %bb.0:
+; CHECK32-IZDINXZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECK32-IZDINXZHINXMIN-NEXT: ret
;
; CHECK64-IZDINXZHINXMIN-LABEL: bitcast_i16_h:
; CHECK64-IZDINXZHINXMIN: # %bb.0:
+; CHECK64-IZDINXZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; CHECK64-IZDINXZHINXMIN-NEXT: ret
%1 = bitcast half %a to i16
ret i16 %1
diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll
index 2ebc28c2ebd440..01e208b3122981 100644
--- a/llvm/test/CodeGen/RISCV/half-imm.ll
+++ b/llvm/test/CodeGen/RISCV/half-imm.ll
@@ -32,12 +32,14 @@ define half @half_imm() nounwind {
; RV32IZHINX: # %bb.0:
; RV32IZHINX-NEXT: lui a0, 4
; RV32IZHINX-NEXT: addi a0, a0, 512
+; RV32IZHINX-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: half_imm:
; RV64IZHINX: # %bb.0:
; RV64IZHINX-NEXT: lui a0, 4
; RV64IZHINX-NEXT: addiw a0, a0, 512
+; RV64IZHINX-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: half_imm:
@@ -50,12 +52,14 @@ define half @half_imm() nounwind {
; RV32IZHINXMIN: # %bb.0:
; RV32IZHINXMIN-NEXT: lui a0, 4
; RV32IZHINXMIN-NEXT: addi a0, a0, 512
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: half_imm:
; RV64IZHINXMIN: # %bb.0:
; RV64IZHINXMIN-NEXT: lui a0, 4
; RV64IZHINXMIN-NEXT: addiw a0, a0, 512
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
ret half 3.0
}
@@ -110,12 +114,12 @@ define half @half_positive_zero(ptr %pf) nounwind {
;
; RV32IZHINX-LABEL: half_positive_zero:
; RV32IZHINX: # %bb.0:
-; RV32IZHINX-NEXT: li a0, 0
+; RV32IZHINX-NEXT: fmv.h a0, zero
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: half_positive_zero:
; RV64IZHINX: # %bb.0:
-; RV64IZHINX-NEXT: li a0, 0
+; RV64IZHINX-NEXT: fmv.h a0, zero
; RV64IZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: half_positive_zero:
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 3e0f838270aa5d..81e29329e71817 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -1797,17 +1797,10 @@ define half @fabs_f16(half %a) nounwind {
; CHECKIZFH-NEXT: fabs.h fa0, fa0
; CHECKIZFH-NEXT: ret
;
-; RV32IZHINX-LABEL: fabs_f16:
-; RV32IZHINX: # %bb.0:
-; RV32IZHINX-NEXT: slli a0, a0, 17
-; RV32IZHINX-NEXT: srli a0, a0, 17
-; RV32IZHINX-NEXT: ret
-;
-; RV64IZHINX-LABEL: fabs_f16:
-; RV64IZHINX: # %bb.0:
-; RV64IZHINX-NEXT: slli a0, a0, 49
-; RV64IZHINX-NEXT: srli a0, a0, 49
-; RV64IZHINX-NEXT: ret
+; CHECKIZHINX-LABEL: fabs_f16:
+; CHECKIZHINX: # %bb.0:
+; CHECKIZHINX-NEXT: fabs.h a0, a0
+; CHECKIZHINX-NEXT: ret
;
; RV32I-LABEL: fabs_f16:
; RV32I: # %bb.0:
@@ -1839,14 +1832,18 @@ define half @fabs_f16(half %a) nounwind {
;
; RV32IZHINXMIN-LABEL: fabs_f16:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: fabs_f16:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = call half @llvm.fabs.f16(half %a)
ret half %1
@@ -2094,20 +2091,26 @@ define half @copysign_f16(half %a, half %b) nounwind {
;
; RV32IZHINXMIN-LABEL: copysign_f16:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: lui a2, 1048568
; RV32IZHINXMIN-NEXT: and a1, a1, a2
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
; RV32IZHINXMIN-NEXT: or a0, a0, a1
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV32IZHINXMIN-NEXT: ret
;
; RV64IZHINXMIN-LABEL: copysign_f16:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x11_h killed $x11_h def $x11
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: lui a2, 1048568
; RV64IZHINXMIN-NEXT: and a1, a1, a2
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
; RV64IZHINXMIN-NEXT: or a0, a0, a1
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h killed $x10
; RV64IZHINXMIN-NEXT: ret
%1 = call half @llvm.copysign.f16(half %a, half %b)
ret half %1
@@ -2835,6 +2838,7 @@ define i1 @isnan_d_fpclass(half %x) {
;
; RV32IZHINXMIN-LABEL: isnan_d_fpclass:
; RV32IZHINXMIN: # %bb.0:
+; RV32IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV32IZHINXMIN-NEXT: slli a0, a0, 17
; RV32IZHINXMIN-NEXT: srli a0, a0, 17
; RV32IZHINXMIN-NEXT: li a1, 31
@@ -2844,6 +2848,7 @@ define i1 @isnan_d_fpclass(half %x) {
;
; RV64IZHINXMIN-LABEL: isnan_d_fpclass:
; RV64IZHINXMIN: # %bb.0:
+; RV64IZHINXMIN-NEXT: # kill: def $x10_h killed $x10_h def $x10
; RV64IZHINXMIN-NEXT: slli a0, a0, 49
; RV64IZHINXMIN-NEXT: srli a0, a0, 49
; RV64IZHINXMIN-NEXT: li a1, 31
diff --git a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
index bc3f44363fb955..220ec402ba0bc1 100644
--- a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
@@ -37,7 +37,7 @@ define half @fminimum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-LABEL: fminimum_f16:
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: feq.h a3, a0, a0
-; CHECKIZHINX-NEXT: mv a2, a1
+; CHECKIZHINX-NEXT: fmv.h a2, a1
; CHECKIZHINX-NEXT: beqz a3, .LBB0_3
; CHECKIZHINX-NEXT: # %bb.1:
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
@@ -46,7 +46,7 @@ define half @fminimum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fmin.h a0, a0, a2
; CHECKIZHINX-NEXT: ret
; CHECKIZHINX-NEXT: .LBB0_3:
-; CHECKIZHINX-NEXT: mv a2, a0
+; CHECKIZHINX-NEXT: fmv.h a2, a0
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
; CHECKIZHINX-NEXT: bnez a3, .LBB0_2
; CHECKIZHINX-NEXT: .LBB0_4:
@@ -81,7 +81,7 @@ define half @fmaximum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-LABEL: fmaximum_f16:
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: feq.h a3, a0, a0
-; CHECKIZHINX-NEXT: mv a2, a1
+; CHECKIZHINX-NEXT: fmv.h a2, a1
; CHECKIZHINX-NEXT: beqz a3, .LBB1_3
; CHECKIZHINX-NEXT: # %bb.1:
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
@@ -90,7 +90,7 @@ define half @fmaximum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fmax.h a0, a0, a2
; CHECKIZHINX-NEXT: ret
; CHECKIZHINX-NEXT: .LBB1_3:
-; CHECKIZHINX-NEXT: mv a2, a0
+; CHECKIZHINX-NEXT: fmv.h a2, a0
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
; CHECKIZHINX-NEXT: bnez a3, .LBB1_2
; CHECKIZHINX-NEXT: .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index 5b6a94a83f94bf..f33d2a98c72c26 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -292,7 +292,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: mv s0, a0
+; RV32IZHINX-NEXT: fmv.h s0, a0
; RV32IZHINX-NEXT: addi a0, sp, 4
; RV32IZHINX-NEXT: call notdead
; RV32IZHINX-NEXT: lh a0, 4(sp)
@@ -307,7 +307,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -32
; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: mv s0, a0
+; RV64IZHINX-NEXT: fmv.h s0, a0
; RV64IZHINX-NEXT: addi a0, sp, 12
; RV64IZHINX-NEXT: call notdead
; RV64IZHINX-NEXT: lh a0, 12(sp)
diff --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
index b793c500fc397b..4cbbdb4a7fd6b2 100644
--- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
@@ -24,7 +24,7 @@ define half @select_fcmp_false(half %a, half %b) nounwind {
;
; CHECKIZHINX-LABEL: select_fcmp_false:
; CHECKIZHINX: # %bb.0:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_fcmp_false:
@@ -56,7 +56,7 @@ define half @select_fcmp_oeq(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: feq.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB1_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB1_2:
; CHECKIZHINX-NEXT: ret
;
@@ -101,7 +101,7 @@ define half @select_fcmp_ogt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a1, a0
; CHECKIZHINX-NEXT: bnez a2, .LBB2_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB2_2:
; CHECKIZHINX-NEXT: ret
;
@@ -146,7 +146,7 @@ define half @select_fcmp_oge(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a1, a0
; CHECKIZHINX-NEXT: bnez a2, .LBB3_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB3_2:
; CHECKIZHINX-NEXT: ret
;
@@ -191,7 +191,7 @@ define half @select_fcmp_olt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB4_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB4_2:
; CHECKIZHINX-NEXT: ret
;
@@ -236,7 +236,7 @@ define half @select_fcmp_ole(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB5_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB5_2:
; CHECKIZHINX-NEXT: ret
;
@@ -285,7 +285,7 @@ define half @select_fcmp_one(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: or a2, a3, a2
; CHECKIZHINX-NEXT: bnez a2, .LBB6_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB6_2:
; CHECKIZHINX-NEXT: ret
;
@@ -338,7 +338,7 @@ define half @select_fcmp_ord(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: and a2, a3, a2
; CHECKIZHINX-NEXT: bnez a2, .LBB7_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB7_2:
; CHECKIZHINX-NEXT: ret
;
@@ -391,7 +391,7 @@ define half @select_fcmp_ueq(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: or a2, a3, a2
; CHECKIZHINX-NEXT: beqz a2, .LBB8_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB8_2:
; CHECKIZHINX-NEXT: ret
;
@@ -440,7 +440,7 @@ define half @select_fcmp_ugt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB9_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB9_2:
; CHECKIZHINX-NEXT: ret
;
@@ -485,7 +485,7 @@ define half @select_fcmp_uge(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB10_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB10_2:
; CHECKIZHINX-NEXT: ret
;
@@ -530,7 +530,7 @@ define half @select_fcmp_ult(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a1, a0
; CHECKIZHINX-NEXT: beqz a2, .LBB11_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB11_2:
; CHECKIZHINX-NEXT: ret
;
@@ -575,7 +575,7 @@ define half @select_fcmp_ule(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a1, a0
; CHECKIZHINX-NEXT: beqz a2, .LBB12_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB12_2:
; CHECKIZHINX-NEXT: ret
;
@@ -620,7 +620,7 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: feq.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB13_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB13_2:
; CHECKIZHINX-NEXT: ret
;
@@ -669,7 +669,7 @@ define half @select_fcmp_uno(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: and a2, a3, a2
; CHECKIZHINX-NEXT: beqz a2, .LBB14_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a0, a1
+; CHECKIZHINX-NEXT: fmv.h a0, a1
; CHECKIZHINX-NEXT: .LBB14_2:
; CHECKIZHINX-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/half-select-icmp.ll b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
index 33766a847f7887..2adf59ae3ada69 100644
--- a/llvm/test/CodeGen/RISCV/half-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
@@ -29,9 +29,9 @@ define half @select_icmp_eq(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: beq a0, a1, .LBB0_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB0_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_eq:
@@ -68,9 +68,9 @@ define half @select_icmp_ne(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bne a0, a1, .LBB1_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB1_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ne:
@@ -107,9 +107,9 @@ define half @select_icmp_ugt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bltu a1, a0, .LBB2_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB2_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ugt:
@@ -146,9 +146,9 @@ define half @select_icmp_uge(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bgeu a0, a1, .LBB3_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB3_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_uge:
@@ -185,9 +185,9 @@ define half @select_icmp_ult(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bltu a0, a1, .LBB4_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB4_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ult:
@@ -224,9 +224,9 @@ define half @select_icmp_ule(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bgeu a1, a0, .LBB5_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB5_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ule:
@@ -263,9 +263,9 @@ define half @select_icmp_sgt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: blt a1, a0, .LBB6_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB6_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sgt:
@@ -302,9 +302,9 @@ define half @select_icmp_sge(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bge a0, a1, .LBB7_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB7_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sge:
@@ -341,9 +341,9 @@ define half @select_icmp_slt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: blt a0, a1, .LBB8_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB8_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_slt:
@@ -380,9 +380,9 @@ define half @select_icmp_sle(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bge a1, a0, .LBB9_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: mv a2, a3
+; CHECKIZHINX-NEXT: fmv.h a2, a3
; CHECKIZHINX-NEXT: .LBB9_2:
-; CHECKIZHINX-NEXT: mv a0, a2
+; CHECKIZHINX-NEXT: fmv.h a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sle:
diff --git a/llvm/test/CodeGen/RISCV/kcfi-mir.ll b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
index 9d8475e2171eaa..e478930d59abc5 100644
--- a/llvm/test/CodeGen/RISCV/kcfi-mir.ll
+++ b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
@@ -10,7 +10,7 @@ define void @f1(ptr noundef %x) !kcfi_type !1 {
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: SD killed $x1, $x2, 8 :: (store (s64) into %stack.0)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
- ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31, implicit-def dead $x1, implicit-def $x2, implicit killed $x10 {
+ ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_h, implicit-def dead $x1, implicit-def $x2, implicit-def $x2_h, implicit killed $x10 {
; CHECK-NEXT: KCFI_CHECK $x10, 12345678, implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31
; CHECK-NEXT: PseudoCALLIndirect killed $x10, csr_ilp32_lp64, implicit-def dead $x1, implicit-def $x2
; CHECK-NEXT: }
@@ -26,7 +26,7 @@ define void @f2(ptr noundef %x) #0 {
; CHECK: bb.0 (%ir-block.0):
; CHECK-NEXT: liveins: $x10
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31, implicit killed $x10, implicit $x2 {
+ ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_h, implicit killed $x10, implicit $x2 {
; CHECK-NEXT: KCFI_CHECK $x10, 12345678, implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31
; CHECK-NEXT: PseudoTAILIndirect killed $x10, implicit $x2
; CHECK-NEXT: }
>From 98dab0f716bd138b405b1d1ff7ae1f502404b53f Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 9 Sep 2024 14:37:34 -0700
Subject: [PATCH 02/13] fixup! Remove unneeded DecoderMethod =
---
llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td | 1 -
1 file changed, 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 7f417d29fc6c2d..0ba0035c7d48fe 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -41,7 +41,6 @@ def GPRAsFPR16 : AsmOperandClass {
def FPR16INX : RegisterOperand<GPRF16> {
let ParserMatchClass = GPRAsFPR16;
- let DecoderMethod = "DecodeGPRF16RegisterClass";
}
def ZfhExt : ExtInfo<"", "", [HasStdExtZfh],
>From ae9fcd68885135e39f0b90bb83b08ba133a15fa4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 10 Sep 2024 11:59:30 -0700
Subject: [PATCH 03/13] fixup! Add comment
---
llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 3 +++
1 file changed, 3 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 37a1643ef5236f..81160b0ba5bba4 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -82,6 +82,8 @@ def sub_gpr_odd : SubRegIndex<32, 32> {
// instructions.
let RegAltNameIndices = [ABIRegAltName] in {
+ // 16-bit sub-registers for use by Zhinx. Having a 16-bit sub-register reduces
+ // the spill size for these operations.
let isConstant = true in
def X0_H : RISCVReg<0, "x0", ["zero"]>;
let CostPerUse = [0, 1] in {
@@ -605,6 +607,7 @@ def VRM8NoV0 : VReg<VM8VTs, (sub VRM8, V0M8), 8>;
def VMV0 : VReg<VMaskVTs, (add V0), 1>;
+// 16-bit GPR sub-register class used by Zhinx instructions.
def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
(sequence "X%u_H", 5, 7),
(sequence "X%u_H", 28, 31),
>From 404aa7b4ccd46f475f78eb0966aab355c435ddf8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 12 Sep 2024 00:18:38 -0700
Subject: [PATCH 04/13] fixup! Add compressed load/store CodeGenOnly
instructions.
---
llvm/lib/Target/RISCV/RISCVInstrInfoZc.td | 25 ++++++++++++++++++----
llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 2 ++
2 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
index 11c2695a59854b..bff740a33c1c1c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZc.td
@@ -112,8 +112,9 @@ class CLoadB_ri<bits<6> funct6, string OpcodeStr>
}
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
-class CLoadH_ri<bits<6> funct6, bit funct1, string OpcodeStr>
- : RVInst16CLH<funct6, funct1, 0b00, (outs GPRC:$rd),
+class CLoadH_ri<bits<6> funct6, bit funct1, string OpcodeStr,
+ DAGOperand rty = GPRC>
+ : RVInst16CLH<funct6, funct1, 0b00, (outs rty:$rd),
(ins GPRCMem:$rs1, uimm2_lsb0:$imm),
OpcodeStr, "$rd, ${imm}(${rs1})"> {
bits<2> imm;
@@ -132,9 +133,10 @@ class CStoreB_rri<bits<6> funct6, string OpcodeStr>
}
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
-class CStoreH_rri<bits<6> funct6, bit funct1, string OpcodeStr>
+class CStoreH_rri<bits<6> funct6, bit funct1, string OpcodeStr,
+ DAGOperand rty = GPRC>
: RVInst16CSH<funct6, funct1, 0b00, (outs),
- (ins GPRC:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm),
+ (ins rty:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm),
OpcodeStr, "$rs2, ${imm}(${rs1})"> {
bits<2> imm;
@@ -202,7 +204,15 @@ def C_SB : CStoreB_rri<0b100010, "c.sb">,
Sched<[WriteSTB, ReadStoreData, ReadMemBase]>;
def C_SH : CStoreH_rri<0b100011, 0b0, "c.sh">,
Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
+
+// Compressed versions of Zhinx load/store.
+let isCodeGenOnly = 1 in {
+def C_LH_INX : CLoadH_ri<0b100001, 0b1, "c.lh", GPRF16C>,
+ Sched<[WriteLDH, ReadMemBase]>;
+def C_SH_INX : CStoreH_rri<0b100011, 0b0, "c.sh", GPRF16C>,
+ Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
}
+} // Predicates = [HasStdExtZcb]
// Zcmp
let DecoderNamespace = "RVZcmp", Predicates = [HasStdExtZcmp],
@@ -318,6 +328,13 @@ def : CompressPat<(SB GPRC:$rs2, GPRCMem:$rs1, uimm2:$imm),
(C_SB GPRC:$rs2, GPRCMem:$rs1, uimm2:$imm)>;
def : CompressPat<(SH GPRC:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm),
(C_SH GPRC:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm)>;
+
+let isCompressOnly = true in {
+def : CompressPat<(LH_INX GPRF16C:$rd, GPRCMem:$rs1, uimm2_lsb0:$imm),
+ (C_LH_INX GPRF16C:$rd, GPRCMem:$rs1, uimm2_lsb0:$imm)>;
+def : CompressPat<(SH_INX GPRF16C:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm),
+ (C_SH_INX GPRF16C:$rs2, GPRCMem:$rs1, uimm2_lsb0:$imm)>;
+}
}// Predicates = [HasStdExtZcb]
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 81160b0ba5bba4..9cb589f2441a21 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -614,6 +614,8 @@ def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
(sequence "X%u_H", 8, 9),
(sequence "X%u_H", 18, 27),
(sequence "X%u_H", 0, 4))>;
+def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
+ (sequence "X%u_H", 8, 9))>;
let RegInfos = XLenRI in {
def GPRF32 : RISCVRegisterClass<[f32], 32, (add GPR)>;
>From 37bd470399f0ee044d0ed98a8b9d4bb5cb20a9bf Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 12 Sep 2024 00:18:38 -0700
Subject: [PATCH 05/13] fixup! Add Zhinx load/store to RISCVMakeCompressible
---
llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index 3f423450618df2..96c0aa4a69f87b 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -103,8 +103,10 @@ static unsigned log2LdstWidth(unsigned Opcode) {
case RISCV::SB:
return 0;
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::SH:
+ case RISCV::SH_INX:
return 1;
case RISCV::LW:
case RISCV::SW:
@@ -128,8 +130,10 @@ static unsigned offsetMask(unsigned Opcode) {
case RISCV::SB:
return maskTrailingOnes<unsigned>(2U);
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::SH:
+ case RISCV::SH_INX:
return maskTrailingOnes<unsigned>(1U);
case RISCV::LW:
case RISCV::SW:
@@ -186,6 +190,7 @@ static bool isCompressibleLoad(const MachineInstr &MI) {
return false;
case RISCV::LBU:
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
return STI.hasStdExtZcb();
case RISCV::LW:
@@ -207,6 +212,7 @@ static bool isCompressibleStore(const MachineInstr &MI) {
return false;
case RISCV::SB:
case RISCV::SH:
+ case RISCV::SH_INX:
return STI.hasStdExtZcb();
case RISCV::SW:
case RISCV::SD:
>From 3f61dbbf6087c7ac3d3fdbb7a5e48ae1f58a61a4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 13 Sep 2024 14:33:08 -0700
Subject: [PATCH 06/13] fixup! Add a pseudo MV instruction for copy so we can
compress it.
---
.../Target/RISCV/RISCVExpandPseudoInsts.cpp | 21 ++++++++++
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 19 +++------
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 4 +-
llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td | 7 ++++
.../lib/Target/RISCV/RISCVMergeBaseOffset.cpp | 2 +
llvm/test/CodeGen/RISCV/half-imm.ll | 4 +-
.../CodeGen/RISCV/half-maximum-minimum.ll | 8 ++--
llvm/test/CodeGen/RISCV/half-mem.ll | 4 +-
llvm/test/CodeGen/RISCV/half-select-fcmp.ll | 30 +++++++-------
llvm/test/CodeGen/RISCV/half-select-icmp.ll | 40 +++++++++----------
10 files changed, 81 insertions(+), 58 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 72f96965ae9857..2501256ca6adf0 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -48,6 +48,8 @@ class RISCVExpandPseudo : public MachineFunctionPass {
MachineBasicBlock::iterator &NextMBBI);
bool expandVMSET_VMCLR(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, unsigned Opcode);
+ bool expandMV_FPR16INX(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
bool expandRV32ZdinxStore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI);
bool expandRV32ZdinxLoad(MachineBasicBlock &MBB,
@@ -104,6 +106,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
// expanded instructions for each pseudo is correct in the Size field of the
// tablegen definition for the pseudo.
switch (MBBI->getOpcode()) {
+ case RISCV::PseudoMV_FPR16INX:
+ return expandMV_FPR16INX(MBB, MBBI);
case RISCV::PseudoRV32ZdinxSD:
return expandRV32ZdinxStore(MBB, MBBI);
case RISCV::PseudoRV32ZdinxLD:
@@ -266,6 +270,23 @@ bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB,
return true;
}
+bool RISCVExpandPseudo::expandMV_FPR16INX(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ DebugLoc DL = MBBI->getDebugLoc();
+ const TargetRegisterInfo *TRI = STI->getRegisterInfo();
+ Register DstReg = TRI->getMatchingSuperReg(
+ MBBI->getOperand(0).getReg(), RISCV::sub_16, &RISCV::GPRRegClass);
+ Register SrcReg = TRI->getMatchingSuperReg(
+ MBBI->getOperand(1).getReg(), RISCV::sub_16, &RISCV::GPRRegClass);
+
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), DstReg)
+ .addReg(SrcReg, getKillRegState(MBBI->getOperand(1).isKill()))
+ .addImm(0);
+
+ MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+ return true;
+}
+
// This function expands the PseudoRV32ZdinxSD for storing a double-precision
// floating-point value into memory by generating an equivalent instruction
// sequence for RV32.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index d2b56ea5aa5f8e..316a68378d8889 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -463,19 +463,9 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
if (RISCV::GPRF16RegClass.contains(DstReg, SrcReg)) {
- if (STI.hasStdExtZhinx()) {
- BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_H_INX), DstReg)
- .addReg(SrcReg, getKillRegState(KillSrc))
- .addReg(SrcReg, getKillRegState(KillSrc));
- return;
- }
- DstReg =
- TRI->getMatchingSuperReg(DstReg, RISCV::sub_16, &RISCV::GPRRegClass);
- SrcReg =
- TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16, &RISCV::GPRRegClass);
- BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
- .addReg(SrcReg, getKillRegState(KillSrc))
- .addImm(0);
+ BuildMI(MBB, MBBI, DL, get(RISCV::PseudoMV_FPR16INX), DstReg)
+ .addReg(SrcReg,
+ getKillRegState(KillSrc) | getRenamableRegState(RenamableSrc));
return;
}
@@ -1528,6 +1518,9 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
}
switch (Opcode) {
+ case RISCV::PseudoMV_FPR16INX:
+ // MV is always compressible.
+ return STI.hasStdExtCOrZca() ? 2 : 4;
case TargetOpcode::STACKMAP:
// The upper bound for a stackmap intrinsic is the full length of its shadow
return StackMapOpers(&MI).getNumPatchBytes();
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index d642d99a76f0ce..ac3cb9dc091e15 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -543,8 +543,8 @@ class HStore_rr<bits<7> funct7, string opcodestr>
}
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
-class ALU_ri<bits<3> funct3, string opcodestr>
- : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
+class ALU_ri<bits<3> funct3, string opcodestr, DAGOperand rty = GPR>
+ : RVInstI<funct3, OPC_OP_IMM, (outs rty:$rd), (ins rty:$rs1, simm12:$imm12),
opcodestr, "$rd, $rs1, $imm12">,
Sched<[WriteIALU, ReadIALU]>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 0ba0035c7d48fe..51123180d47c69 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -93,6 +93,13 @@ let Predicates = [HasStdExtZhinxmin], isCodeGenOnly = 1 in {
def LH_INX : Load_ri<0b001, "lh", GPRF16>, Sched<[WriteLDH, ReadMemBase]>;
def SH_INX : Store_rri<0b001, "sh", GPRF16>,
Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
+
+// ADDI with GPRF16 register class to use for copy. This should not be used as
+// general ADDI, so the immediate should always be zero.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveReg = 1,
+ hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def PseudoMV_FPR16INX : Pseudo<(outs GPRF16:$rd), (ins GPRF16:$rs), []>,
+ Sched<[WriteIALU, ReadIALU]>;
}
foreach Ext = ZfhExts in {
diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
index b6ac3384e7d3ee..b3a2877edde4e3 100644
--- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -385,6 +385,7 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
return false;
case RISCV::LB:
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LW:
case RISCV::LBU:
case RISCV::LHU:
@@ -395,6 +396,7 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
case RISCV::FLD:
case RISCV::SB:
case RISCV::SH:
+ case RISCV::SH_INX:
case RISCV::SW:
case RISCV::SD:
case RISCV::FSH:
diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll
index 01e208b3122981..1045df1c3e7668 100644
--- a/llvm/test/CodeGen/RISCV/half-imm.ll
+++ b/llvm/test/CodeGen/RISCV/half-imm.ll
@@ -114,12 +114,12 @@ define half @half_positive_zero(ptr %pf) nounwind {
;
; RV32IZHINX-LABEL: half_positive_zero:
; RV32IZHINX: # %bb.0:
-; RV32IZHINX-NEXT: fmv.h a0, zero
+; RV32IZHINX-NEXT: li a0, 0
; RV32IZHINX-NEXT: ret
;
; RV64IZHINX-LABEL: half_positive_zero:
; RV64IZHINX: # %bb.0:
-; RV64IZHINX-NEXT: fmv.h a0, zero
+; RV64IZHINX-NEXT: li a0, 0
; RV64IZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: half_positive_zero:
diff --git a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
index 220ec402ba0bc1..bc3f44363fb955 100644
--- a/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/half-maximum-minimum.ll
@@ -37,7 +37,7 @@ define half @fminimum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-LABEL: fminimum_f16:
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: feq.h a3, a0, a0
-; CHECKIZHINX-NEXT: fmv.h a2, a1
+; CHECKIZHINX-NEXT: mv a2, a1
; CHECKIZHINX-NEXT: beqz a3, .LBB0_3
; CHECKIZHINX-NEXT: # %bb.1:
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
@@ -46,7 +46,7 @@ define half @fminimum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fmin.h a0, a0, a2
; CHECKIZHINX-NEXT: ret
; CHECKIZHINX-NEXT: .LBB0_3:
-; CHECKIZHINX-NEXT: fmv.h a2, a0
+; CHECKIZHINX-NEXT: mv a2, a0
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
; CHECKIZHINX-NEXT: bnez a3, .LBB0_2
; CHECKIZHINX-NEXT: .LBB0_4:
@@ -81,7 +81,7 @@ define half @fmaximum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-LABEL: fmaximum_f16:
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: feq.h a3, a0, a0
-; CHECKIZHINX-NEXT: fmv.h a2, a1
+; CHECKIZHINX-NEXT: mv a2, a1
; CHECKIZHINX-NEXT: beqz a3, .LBB1_3
; CHECKIZHINX-NEXT: # %bb.1:
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
@@ -90,7 +90,7 @@ define half @fmaximum_f16(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fmax.h a0, a0, a2
; CHECKIZHINX-NEXT: ret
; CHECKIZHINX-NEXT: .LBB1_3:
-; CHECKIZHINX-NEXT: fmv.h a2, a0
+; CHECKIZHINX-NEXT: mv a2, a0
; CHECKIZHINX-NEXT: feq.h a3, a1, a1
; CHECKIZHINX-NEXT: bnez a3, .LBB1_2
; CHECKIZHINX-NEXT: .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll
index f33d2a98c72c26..5b6a94a83f94bf 100644
--- a/llvm/test/CodeGen/RISCV/half-mem.ll
+++ b/llvm/test/CodeGen/RISCV/half-mem.ll
@@ -292,7 +292,7 @@ define half @flh_stack(half %a) nounwind {
; RV32IZHINX-NEXT: addi sp, sp, -16
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: fmv.h s0, a0
+; RV32IZHINX-NEXT: mv s0, a0
; RV32IZHINX-NEXT: addi a0, sp, 4
; RV32IZHINX-NEXT: call notdead
; RV32IZHINX-NEXT: lh a0, 4(sp)
@@ -307,7 +307,7 @@ define half @flh_stack(half %a) nounwind {
; RV64IZHINX-NEXT: addi sp, sp, -32
; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: fmv.h s0, a0
+; RV64IZHINX-NEXT: mv s0, a0
; RV64IZHINX-NEXT: addi a0, sp, 12
; RV64IZHINX-NEXT: call notdead
; RV64IZHINX-NEXT: lh a0, 12(sp)
diff --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
index 4cbbdb4a7fd6b2..b793c500fc397b 100644
--- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
@@ -24,7 +24,7 @@ define half @select_fcmp_false(half %a, half %b) nounwind {
;
; CHECKIZHINX-LABEL: select_fcmp_false:
; CHECKIZHINX: # %bb.0:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_fcmp_false:
@@ -56,7 +56,7 @@ define half @select_fcmp_oeq(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: feq.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB1_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB1_2:
; CHECKIZHINX-NEXT: ret
;
@@ -101,7 +101,7 @@ define half @select_fcmp_ogt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a1, a0
; CHECKIZHINX-NEXT: bnez a2, .LBB2_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB2_2:
; CHECKIZHINX-NEXT: ret
;
@@ -146,7 +146,7 @@ define half @select_fcmp_oge(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a1, a0
; CHECKIZHINX-NEXT: bnez a2, .LBB3_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB3_2:
; CHECKIZHINX-NEXT: ret
;
@@ -191,7 +191,7 @@ define half @select_fcmp_olt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB4_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB4_2:
; CHECKIZHINX-NEXT: ret
;
@@ -236,7 +236,7 @@ define half @select_fcmp_ole(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a0, a1
; CHECKIZHINX-NEXT: bnez a2, .LBB5_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB5_2:
; CHECKIZHINX-NEXT: ret
;
@@ -285,7 +285,7 @@ define half @select_fcmp_one(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: or a2, a3, a2
; CHECKIZHINX-NEXT: bnez a2, .LBB6_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB6_2:
; CHECKIZHINX-NEXT: ret
;
@@ -338,7 +338,7 @@ define half @select_fcmp_ord(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: and a2, a3, a2
; CHECKIZHINX-NEXT: bnez a2, .LBB7_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB7_2:
; CHECKIZHINX-NEXT: ret
;
@@ -391,7 +391,7 @@ define half @select_fcmp_ueq(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: or a2, a3, a2
; CHECKIZHINX-NEXT: beqz a2, .LBB8_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB8_2:
; CHECKIZHINX-NEXT: ret
;
@@ -440,7 +440,7 @@ define half @select_fcmp_ugt(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB9_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB9_2:
; CHECKIZHINX-NEXT: ret
;
@@ -485,7 +485,7 @@ define half @select_fcmp_uge(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB10_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB10_2:
; CHECKIZHINX-NEXT: ret
;
@@ -530,7 +530,7 @@ define half @select_fcmp_ult(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: fle.h a2, a1, a0
; CHECKIZHINX-NEXT: beqz a2, .LBB11_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB11_2:
; CHECKIZHINX-NEXT: ret
;
@@ -575,7 +575,7 @@ define half @select_fcmp_ule(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: flt.h a2, a1, a0
; CHECKIZHINX-NEXT: beqz a2, .LBB12_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB12_2:
; CHECKIZHINX-NEXT: ret
;
@@ -620,7 +620,7 @@ define half @select_fcmp_une(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: feq.h a2, a0, a1
; CHECKIZHINX-NEXT: beqz a2, .LBB13_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB13_2:
; CHECKIZHINX-NEXT: ret
;
@@ -669,7 +669,7 @@ define half @select_fcmp_uno(half %a, half %b) nounwind {
; CHECKIZHINX-NEXT: and a2, a3, a2
; CHECKIZHINX-NEXT: beqz a2, .LBB14_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a0, a1
+; CHECKIZHINX-NEXT: mv a0, a1
; CHECKIZHINX-NEXT: .LBB14_2:
; CHECKIZHINX-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/half-select-icmp.ll b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
index 2adf59ae3ada69..33766a847f7887 100644
--- a/llvm/test/CodeGen/RISCV/half-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-icmp.ll
@@ -29,9 +29,9 @@ define half @select_icmp_eq(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: beq a0, a1, .LBB0_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB0_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_eq:
@@ -68,9 +68,9 @@ define half @select_icmp_ne(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bne a0, a1, .LBB1_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB1_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ne:
@@ -107,9 +107,9 @@ define half @select_icmp_ugt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bltu a1, a0, .LBB2_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB2_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ugt:
@@ -146,9 +146,9 @@ define half @select_icmp_uge(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bgeu a0, a1, .LBB3_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB3_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_uge:
@@ -185,9 +185,9 @@ define half @select_icmp_ult(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bltu a0, a1, .LBB4_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB4_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ult:
@@ -224,9 +224,9 @@ define half @select_icmp_ule(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bgeu a1, a0, .LBB5_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB5_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_ule:
@@ -263,9 +263,9 @@ define half @select_icmp_sgt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: blt a1, a0, .LBB6_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB6_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sgt:
@@ -302,9 +302,9 @@ define half @select_icmp_sge(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bge a0, a1, .LBB7_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB7_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sge:
@@ -341,9 +341,9 @@ define half @select_icmp_slt(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: blt a0, a1, .LBB8_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB8_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_slt:
@@ -380,9 +380,9 @@ define half @select_icmp_sle(i32 signext %a, i32 signext %b, half %c, half %d) {
; CHECKIZHINX: # %bb.0:
; CHECKIZHINX-NEXT: bge a1, a0, .LBB9_2
; CHECKIZHINX-NEXT: # %bb.1:
-; CHECKIZHINX-NEXT: fmv.h a2, a3
+; CHECKIZHINX-NEXT: mv a2, a3
; CHECKIZHINX-NEXT: .LBB9_2:
-; CHECKIZHINX-NEXT: fmv.h a0, a2
+; CHECKIZHINX-NEXT: mv a0, a2
; CHECKIZHINX-NEXT: ret
;
; CHECKIZFHMIN-LABEL: select_icmp_sle:
>From 1d4f4b6a4c5381d267f5687d9cf9bfb92a9de79d Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 13 Sep 2024 15:20:39 -0700
Subject: [PATCH 07/13] fixup! Add load/store to other locations that reference
RISCV::LH/SH
---
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 6 +
llvm/test/CodeGen/RISCV/codemodel-lowering.ll | 282 ++++++++++++++----
2 files changed, 224 insertions(+), 64 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 316a68378d8889..593e499e565bca 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -104,6 +104,7 @@ Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
MemBytes = 1;
break;
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::FLH:
MemBytes = 2;
@@ -144,6 +145,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
MemBytes = 1;
break;
case RISCV::SH:
+ case RISCV::SH_INX:
case RISCV::FSH:
MemBytes = 2;
break;
@@ -2576,6 +2578,7 @@ bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
case RISCV::LB:
case RISCV::LBU:
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::LW:
case RISCV::LWU:
@@ -2585,6 +2588,7 @@ bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
case RISCV::FLD:
case RISCV::SB:
case RISCV::SH:
+ case RISCV::SH_INX:
case RISCV::SW:
case RISCV::SD:
case RISCV::FSH:
@@ -2648,9 +2652,11 @@ bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
case RISCV::LBU:
case RISCV::SB:
case RISCV::LH:
+ case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::FLH:
case RISCV::SH:
+ case RISCV::SH_INX:
case RISCV::FSH:
case RISCV::LW:
case RISCV::LWU:
diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
index ad81db75f7bc97..4831f0b24c7fec 100644
--- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
@@ -1,14 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32f -code-model=small -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV32I-SMALL
-; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32f -code-model=medium -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV32I-MEDIUM
-; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -code-model=small -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64I-SMALL
-; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -code-model=medium -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64I-MEDIUM
-; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -code-model=large -verify-machineinstrs < %s \
-; RUN: | FileCheck %s -check-prefix=RV64I-LARGE
+; RUN: llc -mtriple=riscv32 -mattr=+f,+zfh -target-abi=ilp32f -code-model=small -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32I-SMALL,RV32F-SMALL
+; RUN: llc -mtriple=riscv32 -mattr=+f,+zfh -target-abi=ilp32f -code-model=medium -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32I-MEDIUM,RV32F-MEDIUM
+; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh -target-abi=lp64f -code-model=small -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-SMALL,RV64F-SMALL
+; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh -target-abi=lp64f -code-model=medium -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-MEDIUM,RV64F-MEDIUM
+; RUN: llc -mtriple=riscv64 -mattr=+f,+zfh -target-abi=lp64f -code-model=large -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-LARGE,RV64F-LARGE
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx,+zhinx -target-abi=ilp32 -code-model=small -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32I-SMALL,RV32FINX-SMALL
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx,+zhinx -target-abi=ilp32 -code-model=medium -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32I-MEDIUM,RV32FINX-MEDIUM
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx,+zhinx -target-abi=lp64 -code-model=small -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-SMALL,RV64FINX-SMALL
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx,+zhinx -target-abi=lp64 -code-model=medium -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-MEDIUM,RV64FINX-MEDIUM
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx,+zhinx -target-abi=lp64 -code-model=large -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefixes=RV64I-LARGE,RV64FINX-LARGE
; Check lowering of globals
@G = global i32 0
@@ -238,43 +248,78 @@ indirectgoto:
; Check lowering of constantpools
define float @lower_constantpool(float %a) nounwind {
-; RV32I-SMALL-LABEL: lower_constantpool:
-; RV32I-SMALL: # %bb.0:
-; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32I-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV32I-SMALL-NEXT: fadd.s fa0, fa0, fa5
-; RV32I-SMALL-NEXT: ret
+; RV32F-SMALL-LABEL: lower_constantpool:
+; RV32F-SMALL: # %bb.0:
+; RV32F-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
+; RV32F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
+; RV32F-SMALL-NEXT: fadd.s fa0, fa0, fa5
+; RV32F-SMALL-NEXT: ret
;
-; RV32I-MEDIUM-LABEL: lower_constantpool:
-; RV32I-MEDIUM: # %bb.0:
-; RV32I-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV32I-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV32I-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV32I-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
-; RV32I-MEDIUM-NEXT: ret
+; RV32F-MEDIUM-LABEL: lower_constantpool:
+; RV32F-MEDIUM: # %bb.0:
+; RV32F-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
+; RV32F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV32F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
+; RV32F-MEDIUM-NEXT: ret
;
-; RV64I-SMALL-LABEL: lower_constantpool:
-; RV64I-SMALL: # %bb.0:
-; RV64I-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64I-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV64I-SMALL-NEXT: fadd.s fa0, fa0, fa5
-; RV64I-SMALL-NEXT: ret
+; RV64F-SMALL-LABEL: lower_constantpool:
+; RV64F-SMALL: # %bb.0:
+; RV64F-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
+; RV64F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
+; RV64F-SMALL-NEXT: fadd.s fa0, fa0, fa5
+; RV64F-SMALL-NEXT: ret
;
-; RV64I-MEDIUM-LABEL: lower_constantpool:
-; RV64I-MEDIUM: # %bb.0:
-; RV64I-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV64I-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV64I-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV64I-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
-; RV64I-MEDIUM-NEXT: ret
+; RV64F-MEDIUM-LABEL: lower_constantpool:
+; RV64F-MEDIUM: # %bb.0:
+; RV64F-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
+; RV64F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
+; RV64F-MEDIUM-NEXT: ret
;
-; RV64I-LARGE-LABEL: lower_constantpool:
-; RV64I-LARGE: # %bb.0:
-; RV64I-LARGE-NEXT: .Lpcrel_hi3:
-; RV64I-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV64I-LARGE-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV64I-LARGE-NEXT: fadd.s fa0, fa0, fa5
-; RV64I-LARGE-NEXT: ret
+; RV64F-LARGE-LABEL: lower_constantpool:
+; RV64F-LARGE: # %bb.0:
+; RV64F-LARGE-NEXT: .Lpcrel_hi3:
+; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
+; RV64F-LARGE-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64F-LARGE-NEXT: fadd.s fa0, fa0, fa5
+; RV64F-LARGE-NEXT: ret
+;
+; RV32FINX-SMALL-LABEL: lower_constantpool:
+; RV32FINX-SMALL: # %bb.0:
+; RV32FINX-SMALL-NEXT: lui a1, 260097
+; RV32FINX-SMALL-NEXT: addi a1, a1, -2048
+; RV32FINX-SMALL-NEXT: fadd.s a0, a0, a1
+; RV32FINX-SMALL-NEXT: ret
+;
+; RV32FINX-MEDIUM-LABEL: lower_constantpool:
+; RV32FINX-MEDIUM: # %bb.0:
+; RV32FINX-MEDIUM-NEXT: lui a1, 260097
+; RV32FINX-MEDIUM-NEXT: addi a1, a1, -2048
+; RV32FINX-MEDIUM-NEXT: fadd.s a0, a0, a1
+; RV32FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-SMALL-LABEL: lower_constantpool:
+; RV64FINX-SMALL: # %bb.0:
+; RV64FINX-SMALL-NEXT: lui a1, 260097
+; RV64FINX-SMALL-NEXT: addiw a1, a1, -2048
+; RV64FINX-SMALL-NEXT: fadd.s a0, a0, a1
+; RV64FINX-SMALL-NEXT: ret
+;
+; RV64FINX-MEDIUM-LABEL: lower_constantpool:
+; RV64FINX-MEDIUM: # %bb.0:
+; RV64FINX-MEDIUM-NEXT: lui a1, 260097
+; RV64FINX-MEDIUM-NEXT: addiw a1, a1, -2048
+; RV64FINX-MEDIUM-NEXT: fadd.s a0, a0, a1
+; RV64FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-LARGE-LABEL: lower_constantpool:
+; RV64FINX-LARGE: # %bb.0:
+; RV64FINX-LARGE-NEXT: lui a1, 260097
+; RV64FINX-LARGE-NEXT: addiw a1, a1, -2048
+; RV64FINX-LARGE-NEXT: fadd.s a0, a0, a1
+; RV64FINX-LARGE-NEXT: ret
%1 = fadd float %a, 1.000244140625
ret float %1
}
@@ -289,13 +334,13 @@ define i32 @lower_extern_weak(i32 %a) nounwind {
; RV32I-SMALL-NEXT: lw a0, %lo(W)(a0)
; RV32I-SMALL-NEXT: ret
;
-; RV32I-MEDIUM-LABEL: lower_extern_weak:
-; RV32I-MEDIUM: # %bb.0:
-; RV32I-MEDIUM-NEXT: .Lpcrel_hi4:
-; RV32I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV32I-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV32I-MEDIUM-NEXT: lw a0, 0(a0)
-; RV32I-MEDIUM-NEXT: ret
+; RV32F-MEDIUM-LABEL: lower_extern_weak:
+; RV32F-MEDIUM: # %bb.0:
+; RV32F-MEDIUM-NEXT: .Lpcrel_hi4:
+; RV32F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV32F-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi4)(a0)
+; RV32F-MEDIUM-NEXT: lw a0, 0(a0)
+; RV32F-MEDIUM-NEXT: ret
;
; RV64I-SMALL-LABEL: lower_extern_weak:
; RV64I-SMALL: # %bb.0:
@@ -303,21 +348,130 @@ define i32 @lower_extern_weak(i32 %a) nounwind {
; RV64I-SMALL-NEXT: lw a0, %lo(W)(a0)
; RV64I-SMALL-NEXT: ret
;
-; RV64I-MEDIUM-LABEL: lower_extern_weak:
-; RV64I-MEDIUM: # %bb.0:
-; RV64I-MEDIUM-NEXT: .Lpcrel_hi4:
-; RV64I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV64I-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV64I-MEDIUM-NEXT: lw a0, 0(a0)
-; RV64I-MEDIUM-NEXT: ret
+; RV64F-MEDIUM-LABEL: lower_extern_weak:
+; RV64F-MEDIUM: # %bb.0:
+; RV64F-MEDIUM-NEXT: .Lpcrel_hi4:
+; RV64F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV64F-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
+; RV64F-MEDIUM-NEXT: lw a0, 0(a0)
+; RV64F-MEDIUM-NEXT: ret
;
-; RV64I-LARGE-LABEL: lower_extern_weak:
-; RV64I-LARGE: # %bb.0:
-; RV64I-LARGE-NEXT: .Lpcrel_hi4:
-; RV64I-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
-; RV64I-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV64I-LARGE-NEXT: lw a0, 0(a0)
-; RV64I-LARGE-NEXT: ret
+; RV64F-LARGE-LABEL: lower_extern_weak:
+; RV64F-LARGE: # %bb.0:
+; RV64F-LARGE-NEXT: .Lpcrel_hi4:
+; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
+; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
+; RV64F-LARGE-NEXT: lw a0, 0(a0)
+; RV64F-LARGE-NEXT: ret
+;
+; RV32FINX-MEDIUM-LABEL: lower_extern_weak:
+; RV32FINX-MEDIUM: # %bb.0:
+; RV32FINX-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV32FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV32FINX-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV32FINX-MEDIUM-NEXT: lw a0, 0(a0)
+; RV32FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-MEDIUM-LABEL: lower_extern_weak:
+; RV64FINX-MEDIUM: # %bb.0:
+; RV64FINX-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV64FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV64FINX-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64FINX-MEDIUM-NEXT: lw a0, 0(a0)
+; RV64FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-LARGE-LABEL: lower_extern_weak:
+; RV64FINX-LARGE: # %bb.0:
+; RV64FINX-LARGE-NEXT: .Lpcrel_hi3:
+; RV64FINX-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
+; RV64FINX-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64FINX-LARGE-NEXT: lw a0, 0(a0)
+; RV64FINX-LARGE-NEXT: ret
%1 = load volatile i32, ptr @W
ret i32 %1
}
+
+ at X = global half 1.5
+
+define half @lower_global_half(half %a) nounwind {
+; RV32F-SMALL-LABEL: lower_global_half:
+; RV32F-SMALL: # %bb.0:
+; RV32F-SMALL-NEXT: lui a0, %hi(X)
+; RV32F-SMALL-NEXT: flh fa5, %lo(X)(a0)
+; RV32F-SMALL-NEXT: fadd.h fa0, fa0, fa5
+; RV32F-SMALL-NEXT: ret
+;
+; RV32F-MEDIUM-LABEL: lower_global_half:
+; RV32F-MEDIUM: # %bb.0:
+; RV32F-MEDIUM-NEXT: .Lpcrel_hi5:
+; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X)
+; RV32F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV32F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5
+; RV32F-MEDIUM-NEXT: ret
+;
+; RV64F-SMALL-LABEL: lower_global_half:
+; RV64F-SMALL: # %bb.0:
+; RV64F-SMALL-NEXT: lui a0, %hi(X)
+; RV64F-SMALL-NEXT: flh fa5, %lo(X)(a0)
+; RV64F-SMALL-NEXT: fadd.h fa0, fa0, fa5
+; RV64F-SMALL-NEXT: ret
+;
+; RV64F-MEDIUM-LABEL: lower_global_half:
+; RV64F-MEDIUM: # %bb.0:
+; RV64F-MEDIUM-NEXT: .Lpcrel_hi5:
+; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X)
+; RV64F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV64F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5
+; RV64F-MEDIUM-NEXT: ret
+;
+; RV64F-LARGE-LABEL: lower_global_half:
+; RV64F-LARGE: # %bb.0:
+; RV64F-LARGE-NEXT: .Lpcrel_hi5:
+; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI5_0)
+; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV64F-LARGE-NEXT: flh fa5, 0(a0)
+; RV64F-LARGE-NEXT: fadd.h fa0, fa0, fa5
+; RV64F-LARGE-NEXT: ret
+;
+; RV32FINX-SMALL-LABEL: lower_global_half:
+; RV32FINX-SMALL: # %bb.0:
+; RV32FINX-SMALL-NEXT: lui a1, %hi(X)
+; RV32FINX-SMALL-NEXT: lh a1, %lo(X)(a1)
+; RV32FINX-SMALL-NEXT: fadd.h a0, a0, a1
+; RV32FINX-SMALL-NEXT: ret
+;
+; RV32FINX-MEDIUM-LABEL: lower_global_half:
+; RV32FINX-MEDIUM: # %bb.0:
+; RV32FINX-MEDIUM-NEXT: .Lpcrel_hi4:
+; RV32FINX-MEDIUM-NEXT: auipc a1, %pcrel_hi(X)
+; RV32FINX-MEDIUM-NEXT: lh a1, %pcrel_lo(.Lpcrel_hi4)(a1)
+; RV32FINX-MEDIUM-NEXT: fadd.h a0, a0, a1
+; RV32FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-SMALL-LABEL: lower_global_half:
+; RV64FINX-SMALL: # %bb.0:
+; RV64FINX-SMALL-NEXT: lui a1, %hi(X)
+; RV64FINX-SMALL-NEXT: lh a1, %lo(X)(a1)
+; RV64FINX-SMALL-NEXT: fadd.h a0, a0, a1
+; RV64FINX-SMALL-NEXT: ret
+;
+; RV64FINX-MEDIUM-LABEL: lower_global_half:
+; RV64FINX-MEDIUM: # %bb.0:
+; RV64FINX-MEDIUM-NEXT: .Lpcrel_hi4:
+; RV64FINX-MEDIUM-NEXT: auipc a1, %pcrel_hi(X)
+; RV64FINX-MEDIUM-NEXT: lh a1, %pcrel_lo(.Lpcrel_hi4)(a1)
+; RV64FINX-MEDIUM-NEXT: fadd.h a0, a0, a1
+; RV64FINX-MEDIUM-NEXT: ret
+;
+; RV64FINX-LARGE-LABEL: lower_global_half:
+; RV64FINX-LARGE: # %bb.0:
+; RV64FINX-LARGE-NEXT: .Lpcrel_hi4:
+; RV64FINX-LARGE-NEXT: auipc a1, %pcrel_hi(.LCPI5_0)
+; RV64FINX-LARGE-NEXT: ld a1, %pcrel_lo(.Lpcrel_hi4)(a1)
+; RV64FINX-LARGE-NEXT: lh a1, 0(a1)
+; RV64FINX-LARGE-NEXT: fadd.h a0, a0, a1
+; RV64FINX-LARGE-NEXT: ret
+ %b = load half, ptr @X
+ %1 = fadd half %a, %b
+ ret half %1
+}
>From a209293f21be1c692b2a0692feb651bf96d72c4d Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sat, 14 Sep 2024 19:38:47 -0700
Subject: [PATCH 08/13] fixup! More updates for RISCVMakeCompressible.
---
llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index 96c0aa4a69f87b..20c9c1b7039a89 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -177,6 +177,7 @@ static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
// Return true if Reg is in a compressed register class.
static bool isCompressedReg(Register Reg) {
return RISCV::GPRCRegClass.contains(Reg) ||
+ RISCV::GPRF16CRegClass.contains(Reg) ||
RISCV::FPR32CRegClass.contains(Reg) ||
RISCV::FPR64CRegClass.contains(Reg);
}
@@ -326,6 +327,8 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI,
// Work out the compressed register class from which to scavenge.
if (RISCV::GPRRegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::GPRCRegClass;
+ else if (RISCV::GPRF16RegClass.contains(RegImm.Reg))
+ RCToScavenge = &RISCV::GPRF16CRegClass;
else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::FPR32CRegClass;
else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
@@ -416,6 +419,10 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
.addReg(RegImm.Reg)
.addImm(RegImm.Imm);
+ } else if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
+ assert(RegImm.Imm == 0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR16INX), NewReg)
+ .addReg(RegImm.Reg);
} else {
// If we are looking at replacing an FPR register we don't expect to
// have any offset. The only compressible FP instructions with an offset
>From 56c3b9436d3fe9fed07ce897d2c3246488414567 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 16 Sep 2024 12:31:45 -0700
Subject: [PATCH 09/13] fixup! testing and fixes for RISCVMakeCompressible
---
.../Target/RISCV/RISCVMakeCompressible.cpp | 2 +-
.../RISCV/make-compressible-zbc-zhinx.mir | 249 ++++++++++++++++++
2 files changed, 250 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/RISCV/make-compressible-zbc-zhinx.mir
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index 20c9c1b7039a89..d742f38bd120c3 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -419,7 +419,7 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
.addReg(RegImm.Reg)
.addImm(RegImm.Imm);
- } else if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
+ } else if (RISCV::GPRF16RegClass.contains(RegImm.Reg)) {
assert(RegImm.Imm == 0);
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR16INX), NewReg)
.addReg(RegImm.Reg);
diff --git a/llvm/test/CodeGen/RISCV/make-compressible-zbc-zhinx.mir b/llvm/test/CodeGen/RISCV/make-compressible-zbc-zhinx.mir
new file mode 100644
index 00000000000000..45fcc792d2fcae
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/make-compressible-zbc-zhinx.mir
@@ -0,0 +1,249 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+zcb,+zhinx -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+zcb,+zhinx -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+ define void @store_common_value_half(ptr %a, ptr %b, ptr %c) #0 {
+ entry:
+ store half 0.0, ptr %a, align 2
+ store half 0.0, ptr %b, align 2
+ store half 0.0, ptr %c, align 2
+ ret void
+ }
+
+ define void @store_common_ptr_half(ptr %p) #0 {
+ entry:
+ store volatile half 2.0, ptr %p, align 2
+ store volatile half 32.0, ptr %p, align 2
+ store volatile half 512.0, ptr %p, align 2
+ ret void
+ }
+
+ define void @load_common_ptr_half(ptr %p) #0 {
+ entry:
+ %0 = load volatile half, ptr %p, align 2
+ %1 = load volatile half, ptr %p, align 2
+ %2 = load volatile half, ptr %p, align 2
+ ret void
+ }
+
+ define void @store_large_offset_half(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds half, ptr %p, i32 100
+ store volatile half 2.0, ptr %0, align 2
+ %1 = getelementptr inbounds half, ptr %p, i32 101
+ store volatile half 32.0, ptr %1, align 2
+ %2 = getelementptr inbounds half, ptr %p, i32 102
+ store volatile half 512.0, ptr %2, align 2
+ %3 = getelementptr inbounds half, ptr %p, i32 103
+ store volatile half 16384.0, ptr %3, align 2
+ ret void
+ }
+
+ define void @load_large_offset_half(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds half, ptr %p, i32 100
+ %a = load volatile half, ptr %0, align 2
+ %1 = getelementptr inbounds half, ptr %p, i32 100
+ %b = load volatile half, ptr %1, align 2
+ %2 = getelementptr inbounds half, ptr %p, i32 101
+ %c = load volatile half, ptr %2, align 2
+ %3 = getelementptr inbounds half, ptr %p, i32 101
+ %d = load volatile half, ptr %3, align 2
+ ret void
+ }
+
+ define void @store_large_offset_no_opt_half(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds i8, ptr %p, i8 100
+ store volatile half 2.0, ptr %0, align 2
+ %1 = getelementptr inbounds i8, ptr %p, i8 101
+ store volatile half 32.0, ptr %1, align 2
+ %2 = getelementptr inbounds i8, ptr %p, i8 104
+ store volatile half 512.0, ptr %2, align 2
+ ret void
+ }
+
+ define void @load_large_offset_no_opt_half(ptr %p) #0 {
+ entry:
+ %0 = getelementptr inbounds half, ptr %p, i32 100
+ %a = load volatile half, ptr %0, align 2
+ %1 = getelementptr inbounds half, ptr %p, i32 101
+ %c = load volatile half, ptr %1, align 2
+ %2 = getelementptr inbounds half, ptr %p, i32 102
+ %d = load volatile half, ptr %2, align 2
+ ret void
+ }
+
+ attributes #0 = { minsize }
+
+...
+---
+name: store_common_value_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_half
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13_h = PseudoMV_FPR16INX $x0_h
+ ; CHECK-NEXT: SH_INX $x13_h, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ ; CHECK-NEXT: SH_INX $x13_h, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ ; CHECK-NEXT: SH_INX $x13_h, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SH_INX $x0_h, killed renamable $x10, 0 :: (store (s16) into %ir.a)
+ SH_INX $x0_h, killed renamable $x11, 0 :: (store (s16) into %ir.b)
+ SH_INX $x0_h, killed renamable $x12, 0 :: (store (s16) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_half
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10 = LUI 4
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: SH_INX killed renamable $x10_h, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = LUI 5
+ ; CHECK-NEXT: SH_INX killed renamable $x10_h, $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: renamable $x10 = LUI 6
+ ; CHECK-NEXT: SH_INX killed renamable $x10_h, killed $x11, 0 :: (volatile store (s16) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x10 = LUI 4
+ SH_INX killed renamable $x10_h, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = LUI 5
+ SH_INX killed renamable $x10_h, renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ renamable $x10 = LUI 6
+ SH_INX killed renamable $x10_h, killed renamable $x16, 0 :: (volatile store (s16) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_half
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_half
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 0
+ ; CHECK-NEXT: dead $x10_h = LH_INX $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10_h = LH_INX $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: dead $x10_h = LH_INX killed $x11, 0 :: (volatile load (s16) from %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10_h = LH_INX renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10_h = LH_INX renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ dead $x10_h = LH_INX killed renamable $x16, 0 :: (volatile load (s16) from %ir.p)
+ PseudoRET
+
+...
+---
+name: store_large_offset_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: store_large_offset_half
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = LUI 4
+ ; CHECK-NEXT: $x12 = ADDI $x10, 200
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, $x12, 0 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = LUI 5
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, $x12, 0 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = LUI 6
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, $x12, 2 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: renamable $x11 = LUI 7
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, killed $x12, 2 :: (volatile store (s16) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = LUI 4
+ SH_INX killed renamable $x11_h, renamable $x10, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = LUI 5
+ SH_INX killed renamable $x11_h, renamable $x10, 200 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = LUI 6
+ SH_INX killed renamable $x11_h, renamable $x10, 202 :: (volatile store (s16) into %ir.2)
+ renamable $x11 = LUI 7
+ SH_INX killed renamable $x11_h, killed renamable $x10, 202 :: (volatile store (s16) into %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_half
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x11 = ADDI $x16, 100
+ ; CHECK-NEXT: dead $x10_h = LH_INX $x11, 0 :: (volatile load (s16) from %ir.0)
+ ; CHECK-NEXT: dead $x10_h = LH_INX $x11, 0 :: (volatile load (s16) from %ir.1)
+ ; CHECK-NEXT: dead $x10_h = LH_INX $x11, 2 :: (volatile load (s16) from %ir.2)
+ ; CHECK-NEXT: dead $x10_h = LH_INX killed $x11, 2 :: (volatile load (s16) from %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10_h = LH_INX renamable $x16, 100 :: (volatile load (s16) from %ir.0)
+ dead $x10_h = LH_INX renamable $x16, 100 :: (volatile load (s16) from %ir.1)
+ dead $x10_h = LH_INX renamable $x16, 102 :: (volatile load (s16) from %ir.2)
+ dead $x10_h = LH_INX killed renamable $x16, 102 :: (volatile load (s16) from %ir.3)
+ PseudoRET
+
+...
+---
+name: store_large_offset_no_opt_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: store_large_offset_no_opt_half
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11 = LUI 4
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ ; CHECK-NEXT: renamable $x11 = LUI 5
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ ; CHECK-NEXT: renamable $x11 = LUI 6
+ ; CHECK-NEXT: SH_INX killed renamable $x11_h, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ renamable $x11 = LUI 4
+ SH_INX killed renamable $x11_h, renamable $x16, 200 :: (volatile store (s16) into %ir.0)
+ renamable $x11 = LUI 5
+ SH_INX killed renamable $x11_h, renamable $x16, 202 :: (volatile store (s16) into %ir.1)
+ renamable $x11 = LUI 6
+ SH_INX killed renamable $x11_h, renamable $x16, 204 :: (volatile store (s16) into %ir.2)
+ PseudoRET
+
+...
+---
+name: load_large_offset_no_opt_half
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_large_offset_no_opt_half
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $x10_h = LH_INX renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ ; CHECK-NEXT: dead $x10_h = LH_INX renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ ; CHECK-NEXT: dead $x10_h = LH_INX killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ ; CHECK-NEXT: PseudoRET
+ dead $x10_h = LH_INX renamable $x16, 100 :: (volatile load (s8) from %ir.0)
+ dead $x10_h = LH_INX renamable $x16, 102 :: (volatile load (s8) from %ir.1)
+ dead $x10_h = LH_INX killed renamable $x16, 104 :: (volatile load (s8) from %ir.2)
+ PseudoRET
+
+...
>From 025652cf0872c56c460d3e1ea3eacc82f19946e2 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 16 Sep 2024 15:03:19 -0700
Subject: [PATCH 10/13] fixup! clang-format
---
llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index d742f38bd120c3..5973e5bf2e5252 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -421,7 +421,8 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
.addImm(RegImm.Imm);
} else if (RISCV::GPRF16RegClass.contains(RegImm.Reg)) {
assert(RegImm.Imm == 0);
- BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR16INX), NewReg)
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR16INX),
+ NewReg)
.addReg(RegImm.Reg);
} else {
// If we are looking at replacing an FPR register we don't expect to
>From e7055732ffdd8c8a25c9081f789014dae08d20cd Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 9 Sep 2024 13:43:48 -0700
Subject: [PATCH 11/13] [RISCV] Add 32 bit GPR sub-register for Zfinx.
This patches adds a 32 bit register class for use with Zfinx instructions.
This makes them more similar to F instructions and allows us to
only spill 32 bits.
I've added CodeGenOnly instructions for load/store using GPRF32 as that
gave better results than insert_subreg/extract_subreg. I'm using FSGNJ for
GPRF32 copy with Zfinx as that gave better results from MachineCopyPropagation.
Function arguments use this new GPRF32 register class for f32 arguments
with Zfinx. Eliminating the need to use RISCVISD::FMV* nodes.
This is similar to #107446 which adds a 16 bit register class.
---
.../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 10 +
.../RISCV/Disassembler/RISCVDisassembler.cpp | 13 +
llvm/lib/Target/RISCV/RISCVCallingConv.cpp | 72 +++-
.../RISCV/RISCVDeadRegisterDefinitions.cpp | 2 +
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 3 +
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 14 +
llvm/lib/Target/RISCV/RISCVInstrInfoF.td | 27 +-
llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 113 ++++--
.../CodeGen/RISCV/fastcc-without-f-reg.ll | 382 ++++++++++--------
llvm/test/CodeGen/RISCV/float-arith.ll | 35 +-
.../RISCV/float-bitmanip-dagcombines.ll | 6 +-
llvm/test/CodeGen/RISCV/float-br-fcmp.ll | 8 +-
llvm/test/CodeGen/RISCV/float-convert.ll | 8 +-
llvm/test/CodeGen/RISCV/float-frem.ll | 7 +-
llvm/test/CodeGen/RISCV/float-imm.ll | 4 +-
.../CodeGen/RISCV/float-intrinsics-strict.ll | 12 +-
llvm/test/CodeGen/RISCV/float-intrinsics.ll | 78 +---
.../CodeGen/RISCV/float-maximum-minimum.ll | 16 +-
llvm/test/CodeGen/RISCV/float-mem.ll | 4 +-
.../CodeGen/RISCV/float-round-conv-sat.ll | 48 +--
llvm/test/CodeGen/RISCV/float-select-fcmp.ll | 30 +-
llvm/test/CodeGen/RISCV/float-select-icmp.ll | 40 +-
llvm/test/CodeGen/RISCV/half-convert.ll | 8 +-
llvm/test/CodeGen/RISCV/half-intrinsics.ll | 20 +-
.../test/CodeGen/RISCV/half-round-conv-sat.ll | 48 +--
llvm/test/CodeGen/RISCV/kcfi-mir.ll | 4 +-
llvm/test/CodeGen/RISCV/llvm.frexp.ll | 128 +++---
27 files changed, 631 insertions(+), 509 deletions(-)
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 3f51c49aef5986..0f1cc84be14558 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -485,8 +485,14 @@ struct RISCVOperand final : public MCParsedAsmOperand {
RISCVMCRegisterClasses[RISCV::GPRF16RegClassID].contains(Reg.RegNum);
}
+ bool isGPRF32() const {
+ return Kind == KindTy::Register &&
+ RISCVMCRegisterClasses[RISCV::GPRF32RegClassID].contains(Reg.RegNum);
+ }
+
bool isGPRAsFPR() const { return isGPR() && Reg.IsGPRAsFPR; }
bool isGPRAsFPR16() const { return isGPRF16() && Reg.IsGPRAsFPR; }
+ bool isGPRAsFPR32() const { return isGPRF32() && Reg.IsGPRAsFPR; }
bool isGPRPairAsFPR() const { return isGPRPair() && Reg.IsGPRAsFPR; }
bool isGPRPair() const {
@@ -1352,6 +1358,10 @@ unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
Op.Reg.RegNum = Reg - RISCV::X0 + RISCV::X0_H;
return Match_Success;
}
+ if (Kind == MCK_GPRAsFPR32 && Op.isGPRAsFPR()) {
+ Op.Reg.RegNum = Reg - RISCV::X0 + RISCV::X0_W;
+ return Match_Success;
+ }
// There are some GPRF64AsFPR instructions that have no RV32 equivalent. We
// reject them at parsing thinking we should match as GPRPairAsFPR for RV32.
diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
index c2659a51b02096..7c8206cb44dec2 100644
--- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
+++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp
@@ -94,6 +94,19 @@ static DecodeStatus DecodeGPRF16RegisterClass(MCInst &Inst, uint32_t RegNo,
return MCDisassembler::Success;
}
+static DecodeStatus DecodeGPRF32RegisterClass(MCInst &Inst, uint32_t RegNo,
+ uint64_t Address,
+ const MCDisassembler *Decoder) {
+ bool IsRVE = Decoder->getSubtargetInfo().hasFeature(RISCV::FeatureStdExtE);
+
+ if (RegNo >= 32 || (IsRVE && RegNo >= 16))
+ return MCDisassembler::Fail;
+
+ MCRegister Reg = RISCV::X0_W + RegNo;
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
static DecodeStatus DecodeGPRX1X5RegisterClass(MCInst &Inst, uint32_t RegNo,
uint64_t Address,
const MCDisassembler *Decoder) {
diff --git a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
index 767f6bed802b7e..17cab6af3b83ec 100644
--- a/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
+++ b/llvm/lib/Target/RISCV/RISCVCallingConv.cpp
@@ -156,6 +156,23 @@ static ArrayRef<MCPhysReg> getArgGPR16s(const RISCVABI::ABI ABI) {
return ArrayRef(ArgIGPRs);
}
+static ArrayRef<MCPhysReg> getArgGPR32s(const RISCVABI::ABI ABI) {
+ // The GPRs used for passing arguments in the ILP32* and LP64* ABIs, except
+ // the ILP32E ABI.
+ static const MCPhysReg ArgIGPRs[] = {RISCV::X10_W, RISCV::X11_W, RISCV::X12_W,
+ RISCV::X13_W, RISCV::X14_W, RISCV::X15_W,
+ RISCV::X16_W, RISCV::X17_W};
+ // The GPRs used for passing arguments in the ILP32E/ILP64E ABI.
+ static const MCPhysReg ArgEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
+ RISCV::X12_W, RISCV::X13_W,
+ RISCV::X14_W, RISCV::X15_W};
+
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return ArrayRef(ArgEGPRs);
+
+ return ArrayRef(ArgIGPRs);
+}
+
static ArrayRef<MCPhysReg> getFastCCArgGPRs(const RISCVABI::ABI ABI) {
// The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
// for save-restore libcall, so we don't use them.
@@ -194,6 +211,26 @@ static ArrayRef<MCPhysReg> getFastCCArgGPRF16s(const RISCVABI::ABI ABI) {
return ArrayRef(FastCCIGPRs);
}
+static ArrayRef<MCPhysReg> getFastCCArgGPRF32s(const RISCVABI::ABI ABI) {
+ // The GPRs used for passing arguments in the FastCC, X5 and X6 might be used
+ // for save-restore libcall, so we don't use them.
+ // Don't use X7 for fastcc, since Zicfilp uses X7 as the label register.
+ static const MCPhysReg FastCCIGPRs[] = {
+ RISCV::X10_W, RISCV::X11_W, RISCV::X12_W, RISCV::X13_W,
+ RISCV::X14_W, RISCV::X15_W, RISCV::X16_W, RISCV::X17_W,
+ RISCV::X28_W, RISCV::X29_W, RISCV::X30_W, RISCV::X31_W};
+
+ // The GPRs used for passing arguments in the FastCC when using ILP32E/ILP64E.
+ static const MCPhysReg FastCCEGPRs[] = {RISCV::X10_W, RISCV::X11_W,
+ RISCV::X12_W, RISCV::X13_W,
+ RISCV::X14_W, RISCV::X15_W};
+
+ if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
+ return ArrayRef(FastCCEGPRs);
+
+ return ArrayRef(FastCCIGPRs);
+}
+
// Pass a 2*XLEN argument that has been split into two XLEN values through
// registers or the stack as necessary.
static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
@@ -364,11 +401,17 @@ bool llvm::CC_RISCV(unsigned ValNo, MVT ValVT, MVT LocVT,
}
}
+ if ((ValVT == MVT::f32 && Subtarget.hasStdExtZfinx())) {
+ if (MCRegister Reg = State.AllocateReg(getArgGPR32s(ABI))) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
ArrayRef<MCPhysReg> ArgGPRs = RISCV::getArgGPRs(ABI);
- // Zfinx/Zdinx use GPR without a bitcast when possible.
- if ((LocVT == MVT::f32 && XLen == 32 && Subtarget.hasStdExtZfinx()) ||
- (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx())) {
+ // Zdinx use GPR without a bitcast when possible.
+ if (LocVT == MVT::f64 && XLen == 64 && Subtarget.hasStdExtZdinx()) {
if (MCRegister Reg = State.AllocateReg(ArgGPRs)) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
return false;
@@ -616,10 +659,16 @@ bool llvm::CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
}
}
+ // Check if there is an available GPRF32 before hitting the stack.
+ if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
+ if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRF32s(ABI))) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
// Check if there is an available GPR before hitting the stack.
- if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
- (LocVT == MVT::f64 && Subtarget.is64Bit() &&
- Subtarget.hasStdExtZdinx())) {
+ if (LocVT == MVT::f64 && Subtarget.is64Bit() && Subtarget.hasStdExtZdinx()) {
if (MCRegister Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
if (LocVT.getSizeInBits() != Subtarget.getXLen()) {
LocVT = XLenVT;
@@ -723,6 +772,17 @@ bool llvm::CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
}
}
+ if (LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) {
+ static const MCPhysReg GPR32List[] = {
+ RISCV::X9_W, RISCV::X18_W, RISCV::X19_W, RISCV::X20_W,
+ RISCV::X21_W, RISCV::X22_W, RISCV::X23_W, RISCV::X24_W,
+ RISCV::X25_W, RISCV::X26_W, RISCV::X27_W};
+ if (MCRegister Reg = State.AllocateReg(GPR32List)) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return false;
+ }
+ }
+
if ((LocVT == MVT::f32 && Subtarget.hasStdExtZfinx()) ||
(LocVT == MVT::f64 && Subtarget.hasStdExtZdinx() &&
Subtarget.is64Bit())) {
diff --git a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
index 713c7a0661defe..d913c0b201a20c 100644
--- a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
+++ b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp
@@ -97,6 +97,8 @@ bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI, MF);
if (RC && RC->contains(RISCV::X0)) {
X0Reg = RISCV::X0;
+ } else if (RC && RC->contains(RISCV::X0_W)) {
+ X0Reg = RISCV::X0_W;
} else if (RC && RC->contains(RISCV::X0_H)) {
X0Reg = RISCV::X0_H;
} else {
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 3c5368238def58..76b2d61354d854 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -954,6 +954,9 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
if (VT.SimpleTy == MVT::f16 && Opc == RISCV::COPY) {
Res =
CurDAG->getTargetExtractSubreg(RISCV::sub_16, DL, VT, Imm).getNode();
+ } else if (VT.SimpleTy == MVT::f32 && Opc == RISCV::COPY) {
+ Res =
+ CurDAG->getTargetExtractSubreg(RISCV::sub_32, DL, VT, Imm).getNode();
} else if (Opc == RISCV::FCVT_D_W_IN32X || Opc == RISCV::FCVT_D_W)
Res = CurDAG->getMachineNode(
Opc, DL, VT, Imm,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 593e499e565bca..5e4e8eefe6122b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -471,6 +471,14 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
return;
}
+ if (RISCV::GPRF32RegClass.contains(DstReg, SrcReg)) {
+ assert(STI.hasStdExtZfinx());
+ BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_S_INX), DstReg)
+ .addReg(SrcReg, getKillRegState(KillSrc))
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+
if (RISCV::GPRPairRegClass.contains(DstReg, SrcReg)) {
// Emit an ADDI for both parts of GPRPair.
BuildMI(MBB, MBBI, DL, get(RISCV::ADDI),
@@ -595,6 +603,9 @@ void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::SH_INX;
IsScalableVector = false;
+ } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::SW_INX;
+ IsScalableVector = false;
} else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
Opcode = RISCV::PseudoRV32ZdinxSD;
IsScalableVector = false;
@@ -681,6 +692,9 @@ void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
} else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
Opcode = RISCV::LH_INX;
IsScalableVector = false;
+ } else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
+ Opcode = RISCV::LW_INX;
+ IsScalableVector = false;
} else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
Opcode = RISCV::PseudoRV32ZdinxLD;
IsScalableVector = false;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index a00acb372dc2a2..7e9e10381f7e3c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -83,15 +83,14 @@ def any_fma_nsz : PatFrag<(ops node:$rs1, node:$rs2, node:$rs3),
// Zfinx
-def GPRAsFPR : AsmOperandClass {
- let Name = "GPRAsFPR";
+def GPRAsFPR32 : AsmOperandClass {
+ let Name = "GPRAsFPR32";
let ParserMethod = "parseGPRAsFPR";
let RenderMethod = "addRegOperands";
}
def FPR32INX : RegisterOperand<GPRF32> {
- let ParserMatchClass = GPRAsFPR;
- let DecoderMethod = "DecodeGPRRegisterClass";
+ let ParserMatchClass = GPRAsFPR32;
}
// Describes a combination of predicates from F/D/Zfh/Zfhmin or
@@ -306,6 +305,12 @@ def FLW : FPLoad_r<0b010, "flw", FPR32, WriteFLD32>;
def FSW : FPStore_r<0b010, "fsw", FPR32, WriteFST32>;
} // Predicates = [HasStdExtF]
+let Predicates = [HasStdExtZfinx], isCodeGenOnly = 1 in {
+def LW_INX : Load_ri<0b010, "lw", GPRF32>, Sched<[WriteLDW, ReadMemBase]>;
+def SW_INX : Store_rri<0b010, "sw", GPRF32>,
+ Sched<[WriteSTW, ReadStoreData, ReadMemBase]>;
+}
+
foreach Ext = FExts in {
let SchedRW = [WriteFMA32, ReadFMA32, ReadFMA32, ReadFMA32Addend] in {
defm FMADD_S : FPFMA_rrr_frm_m<OPC_MADD, 0b00, "fmadd.s", Ext>;
@@ -685,12 +690,10 @@ defm Select_FPR32INX : SelectCC_GPR_rrirr<FPR32INX, f32>;
def PseudoFROUND_S_INX : PseudoFROUND<FPR32INX, f32>;
/// Loads
-def : Pat<(f32 (load (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))),
- (COPY_TO_REGCLASS (LW GPR:$rs1, simm12:$imm12), GPRF32)>;
+def : LdPat<load, LW_INX, f32>;
/// Stores
-def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12)),
- (SW (COPY_TO_REGCLASS FPR32INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
+def : StPat<store, SW_INX, GPRF32, f32>;
} // Predicates = [HasStdExtZfinx]
let Predicates = [HasStdExtF] in {
@@ -701,8 +704,8 @@ def : Pat<(i32 (bitconvert FPR32:$rs1)), (FMV_X_W FPR32:$rs1)>;
let Predicates = [HasStdExtZfinx] in {
// Moves (no conversion)
-def : Pat<(f32 (bitconvert (i32 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPRF32)>;
-def : Pat<(i32 (bitconvert FPR32INX:$rs1)), (COPY_TO_REGCLASS FPR32INX:$rs1, GPR)>;
+def : Pat<(f32 (bitconvert (i32 GPR:$rs1))), (EXTRACT_SUBREG GPR:$rs1, sub_32)>;
+def : Pat<(i32 (bitconvert FPR32INX:$rs1)), (INSERT_SUBREG (XLenVT (IMPLICIT_DEF)), FPR32INX:$rs1, sub_32)>;
} // Predicates = [HasStdExtZfinx]
let Predicates = [HasStdExtF] in {
@@ -781,8 +784,8 @@ def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_S_LU $rs1, FRM_DYN)>;
let Predicates = [HasStdExtZfinx, IsRV64] in {
// Moves (no conversion)
-def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (COPY_TO_REGCLASS GPR:$src, GPRF32)>;
-def : Pat<(riscv_fmv_x_anyextw_rv64 GPRF32:$src), (COPY_TO_REGCLASS GPRF32:$src, GPR)>;
+def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (EXTRACT_SUBREG GPR:$src, sub_32)>;
+def : Pat<(riscv_fmv_x_anyextw_rv64 GPRF32:$src), (INSERT_SUBREG (XLenVT (IMPLICIT_DEF)), FPR32INX:$src, sub_32)>;
// Use target specific isd nodes to help us remember the result is sign
// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 9cb589f2441a21..7c7796ae17fb79 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -124,41 +124,81 @@ let RegAltNameIndices = [ABIRegAltName] in {
let SubRegIndices = [sub_16] in {
let isConstant = true in
- def X0 : RISCVRegWithSubRegs<0, "x0", [X0_H], ["zero"]>, DwarfRegNum<[0]>;
+ def X0_W : RISCVRegWithSubRegs<0, "x0", [X0_H], ["zero"]>;
let CostPerUse = [0, 1] in {
- def X1 : RISCVRegWithSubRegs<1, "x1", [X1_H], ["ra"]>, DwarfRegNum<[1]>;
- def X2 : RISCVRegWithSubRegs<2, "x2", [X2_H], ["sp"]>, DwarfRegNum<[2]>;
- def X3 : RISCVRegWithSubRegs<3, "x3", [X3_H], ["gp"]>, DwarfRegNum<[3]>;
- def X4 : RISCVRegWithSubRegs<4, "x4", [X4_H], ["tp"]>, DwarfRegNum<[4]>;
- def X5 : RISCVRegWithSubRegs<5, "x5", [X5_H], ["t0"]>, DwarfRegNum<[5]>;
- def X6 : RISCVRegWithSubRegs<6, "x6", [X6_H], ["t1"]>, DwarfRegNum<[6]>;
- def X7 : RISCVRegWithSubRegs<7, "x7", [X7_H], ["t2"]>, DwarfRegNum<[7]>;
+ def X1_W : RISCVRegWithSubRegs<1, "x1", [X1_H], ["ra"]>;
+ def X2_W : RISCVRegWithSubRegs<2, "x2", [X2_H], ["sp"]>;
+ def X3_W : RISCVRegWithSubRegs<3, "x3", [X3_H], ["gp"]>;
+ def X4_W : RISCVRegWithSubRegs<4, "x4", [X4_H], ["tp"]>;
+ def X5_W : RISCVRegWithSubRegs<5, "x5", [X5_H], ["t0"]>;
+ def X6_W : RISCVRegWithSubRegs<6, "x6", [X6_H], ["t1"]>;
+ def X7_W : RISCVRegWithSubRegs<7, "x7", [X7_H], ["t2"]>;
}
- def X8 : RISCVRegWithSubRegs<8, "x8", [X8_H], ["s0", "fp"]>, DwarfRegNum<[8]>;
- def X9 : RISCVRegWithSubRegs<9, "x9", [X9_H], ["s1"]>, DwarfRegNum<[9]>;
- def X10 : RISCVRegWithSubRegs<10,"x10", [X10_H], ["a0"]>, DwarfRegNum<[10]>;
- def X11 : RISCVRegWithSubRegs<11,"x11", [X11_H], ["a1"]>, DwarfRegNum<[11]>;
- def X12 : RISCVRegWithSubRegs<12,"x12", [X12_H], ["a2"]>, DwarfRegNum<[12]>;
- def X13 : RISCVRegWithSubRegs<13,"x13", [X13_H], ["a3"]>, DwarfRegNum<[13]>;
- def X14 : RISCVRegWithSubRegs<14,"x14", [X14_H], ["a4"]>, DwarfRegNum<[14]>;
- def X15 : RISCVRegWithSubRegs<15,"x15", [X15_H], ["a5"]>, DwarfRegNum<[15]>;
+ def X8_W : RISCVRegWithSubRegs<8, "x8", [X8_H], ["s0", "fp"]>;
+ def X9_W : RISCVRegWithSubRegs<9, "x9", [X9_H], ["s1"]>;
+ def X10_W : RISCVRegWithSubRegs<10,"x10", [X10_H], ["a0"]>;
+ def X11_W : RISCVRegWithSubRegs<11,"x11", [X11_H], ["a1"]>;
+ def X12_W : RISCVRegWithSubRegs<12,"x12", [X12_H], ["a2"]>;
+ def X13_W : RISCVRegWithSubRegs<13,"x13", [X13_H], ["a3"]>;
+ def X14_W : RISCVRegWithSubRegs<14,"x14", [X14_H], ["a4"]>;
+ def X15_W : RISCVRegWithSubRegs<15,"x15", [X15_H], ["a5"]>;
let CostPerUse = [0, 1] in {
- def X16 : RISCVRegWithSubRegs<16,"x16", [X16_H], ["a6"]>, DwarfRegNum<[16]>;
- def X17 : RISCVRegWithSubRegs<17,"x17", [X17_H], ["a7"]>, DwarfRegNum<[17]>;
- def X18 : RISCVRegWithSubRegs<18,"x18", [X18_H], ["s2"]>, DwarfRegNum<[18]>;
- def X19 : RISCVRegWithSubRegs<19,"x19", [X19_H], ["s3"]>, DwarfRegNum<[19]>;
- def X20 : RISCVRegWithSubRegs<20,"x20", [X20_H], ["s4"]>, DwarfRegNum<[20]>;
- def X21 : RISCVRegWithSubRegs<21,"x21", [X21_H], ["s5"]>, DwarfRegNum<[21]>;
- def X22 : RISCVRegWithSubRegs<22,"x22", [X22_H], ["s6"]>, DwarfRegNum<[22]>;
- def X23 : RISCVRegWithSubRegs<23,"x23", [X23_H], ["s7"]>, DwarfRegNum<[23]>;
- def X24 : RISCVRegWithSubRegs<24,"x24", [X24_H], ["s8"]>, DwarfRegNum<[24]>;
- def X25 : RISCVRegWithSubRegs<25,"x25", [X25_H], ["s9"]>, DwarfRegNum<[25]>;
- def X26 : RISCVRegWithSubRegs<26,"x26", [X26_H], ["s10"]>, DwarfRegNum<[26]>;
- def X27 : RISCVRegWithSubRegs<27,"x27", [X27_H], ["s11"]>, DwarfRegNum<[27]>;
- def X28 : RISCVRegWithSubRegs<28,"x28", [X28_H], ["t3"]>, DwarfRegNum<[28]>;
- def X29 : RISCVRegWithSubRegs<29,"x29", [X29_H], ["t4"]>, DwarfRegNum<[29]>;
- def X30 : RISCVRegWithSubRegs<30,"x30", [X30_H], ["t5"]>, DwarfRegNum<[30]>;
- def X31 : RISCVRegWithSubRegs<31,"x31", [X31_H], ["t6"]>, DwarfRegNum<[31]>;
+ def X16_W : RISCVRegWithSubRegs<16,"x16", [X16_H], ["a6"]>;
+ def X17_W : RISCVRegWithSubRegs<17,"x17", [X17_H], ["a7"]>;
+ def X18_W : RISCVRegWithSubRegs<18,"x18", [X18_H], ["s2"]>;
+ def X19_W : RISCVRegWithSubRegs<19,"x19", [X19_H], ["s3"]>;
+ def X20_W : RISCVRegWithSubRegs<20,"x20", [X20_H], ["s4"]>;
+ def X21_W : RISCVRegWithSubRegs<21,"x21", [X21_H], ["s5"]>;
+ def X22_W : RISCVRegWithSubRegs<22,"x22", [X22_H], ["s6"]>;
+ def X23_W : RISCVRegWithSubRegs<23,"x23", [X23_H], ["s7"]>;
+ def X24_W : RISCVRegWithSubRegs<24,"x24", [X24_H], ["s8"]>;
+ def X25_W : RISCVRegWithSubRegs<25,"x25", [X25_H], ["s9"]>;
+ def X26_W : RISCVRegWithSubRegs<26,"x26", [X26_H], ["s10"]>;
+ def X27_W : RISCVRegWithSubRegs<27,"x27", [X27_H], ["s11"]>;
+ def X28_W : RISCVRegWithSubRegs<28,"x28", [X28_H], ["t3"]>;
+ def X29_W : RISCVRegWithSubRegs<29,"x29", [X29_H], ["t4"]>;
+ def X30_W : RISCVRegWithSubRegs<30,"x30", [X30_H], ["t5"]>;
+ def X31_W : RISCVRegWithSubRegs<31,"x31", [X31_H], ["t6"]>;
+ }
+ }
+
+ let SubRegIndices = [sub_32] in {
+ let isConstant = true in
+ def X0 : RISCVRegWithSubRegs<0, "x0", [X0_W], ["zero"]>, DwarfRegNum<[0]>;
+ let CostPerUse = [0, 1] in {
+ def X1 : RISCVRegWithSubRegs<1, "x1", [X1_W], ["ra"]>, DwarfRegNum<[1]>;
+ def X2 : RISCVRegWithSubRegs<2, "x2", [X2_W], ["sp"]>, DwarfRegNum<[2]>;
+ def X3 : RISCVRegWithSubRegs<3, "x3", [X3_W], ["gp"]>, DwarfRegNum<[3]>;
+ def X4 : RISCVRegWithSubRegs<4, "x4", [X4_W], ["tp"]>, DwarfRegNum<[4]>;
+ def X5 : RISCVRegWithSubRegs<5, "x5", [X5_W], ["t0"]>, DwarfRegNum<[5]>;
+ def X6 : RISCVRegWithSubRegs<6, "x6", [X6_W], ["t1"]>, DwarfRegNum<[6]>;
+ def X7 : RISCVRegWithSubRegs<7, "x7", [X7_W], ["t2"]>, DwarfRegNum<[7]>;
+ }
+ def X8 : RISCVRegWithSubRegs<8, "x8", [X8_W], ["s0", "fp"]>, DwarfRegNum<[8]>;
+ def X9 : RISCVRegWithSubRegs<9, "x9", [X9_W], ["s1"]>, DwarfRegNum<[9]>;
+ def X10 : RISCVRegWithSubRegs<10,"x10", [X10_W], ["a0"]>, DwarfRegNum<[10]>;
+ def X11 : RISCVRegWithSubRegs<11,"x11", [X11_W], ["a1"]>, DwarfRegNum<[11]>;
+ def X12 : RISCVRegWithSubRegs<12,"x12", [X12_W], ["a2"]>, DwarfRegNum<[12]>;
+ def X13 : RISCVRegWithSubRegs<13,"x13", [X13_W], ["a3"]>, DwarfRegNum<[13]>;
+ def X14 : RISCVRegWithSubRegs<14,"x14", [X14_W], ["a4"]>, DwarfRegNum<[14]>;
+ def X15 : RISCVRegWithSubRegs<15,"x15", [X15_W], ["a5"]>, DwarfRegNum<[15]>;
+ let CostPerUse = [0, 1] in {
+ def X16 : RISCVRegWithSubRegs<16,"x16", [X16_W], ["a6"]>, DwarfRegNum<[16]>;
+ def X17 : RISCVRegWithSubRegs<17,"x17", [X17_W], ["a7"]>, DwarfRegNum<[17]>;
+ def X18 : RISCVRegWithSubRegs<18,"x18", [X18_W], ["s2"]>, DwarfRegNum<[18]>;
+ def X19 : RISCVRegWithSubRegs<19,"x19", [X19_W], ["s3"]>, DwarfRegNum<[19]>;
+ def X20 : RISCVRegWithSubRegs<20,"x20", [X20_W], ["s4"]>, DwarfRegNum<[20]>;
+ def X21 : RISCVRegWithSubRegs<21,"x21", [X21_W], ["s5"]>, DwarfRegNum<[21]>;
+ def X22 : RISCVRegWithSubRegs<22,"x22", [X22_W], ["s6"]>, DwarfRegNum<[22]>;
+ def X23 : RISCVRegWithSubRegs<23,"x23", [X23_W], ["s7"]>, DwarfRegNum<[23]>;
+ def X24 : RISCVRegWithSubRegs<24,"x24", [X24_W], ["s8"]>, DwarfRegNum<[24]>;
+ def X25 : RISCVRegWithSubRegs<25,"x25", [X25_W], ["s9"]>, DwarfRegNum<[25]>;
+ def X26 : RISCVRegWithSubRegs<26,"x26", [X26_W], ["s10"]>, DwarfRegNum<[26]>;
+ def X27 : RISCVRegWithSubRegs<27,"x27", [X27_W], ["s11"]>, DwarfRegNum<[27]>;
+ def X28 : RISCVRegWithSubRegs<28,"x28", [X28_W], ["t3"]>, DwarfRegNum<[28]>;
+ def X29 : RISCVRegWithSubRegs<29,"x29", [X29_W], ["t4"]>, DwarfRegNum<[29]>;
+ def X30 : RISCVRegWithSubRegs<30,"x30", [X30_W], ["t5"]>, DwarfRegNum<[30]>;
+ def X31 : RISCVRegWithSubRegs<31,"x31", [X31_W], ["t6"]>, DwarfRegNum<[31]>;
}
}
}
@@ -617,9 +657,12 @@ def GPRF16 : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 17),
def GPRF16C : RISCVRegisterClass<[f16], 16, (add (sequence "X%u_H", 10, 15),
(sequence "X%u_H", 8, 9))>;
-let RegInfos = XLenRI in {
-def GPRF32 : RISCVRegisterClass<[f32], 32, (add GPR)>;
-} // RegInfos = XLenRI
+def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
+ (sequence "X%u_W", 5, 7),
+ (sequence "X%u_W", 28, 31),
+ (sequence "X%u_W", 8, 9),
+ (sequence "X%u_W", 18, 27),
+ (sequence "X%u_W", 0, 4))>;
// Dummy zero register for use in the register pair containing X0 (as X1 is
// not read to or written when the X0 register pair is used).
diff --git a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
index 08da52df9e70cd..0eefc34ad552a9 100644
--- a/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
+++ b/llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll
@@ -23,26 +23,34 @@ define half @caller_half(half %x) nounwind {
;
; ZFINX32-LABEL: caller_half:
; ZFINX32: # %bb.0: # %entry
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX32-NEXT: lui a1, 1048560
; ZFINX32-NEXT: or a0, a0, a1
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX32-NEXT: tail h
;
; ZFINX64-LABEL: caller_half:
; ZFINX64: # %bb.0: # %entry
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX64-NEXT: lui a1, 1048560
; ZFINX64-NEXT: or a0, a0, a1
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX64-NEXT: tail h
;
; ZDINX32-LABEL: caller_half:
; ZDINX32: # %bb.0: # %entry
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX32-NEXT: lui a1, 1048560
; ZDINX32-NEXT: or a0, a0, a1
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX32-NEXT: tail h
;
; ZDINX64-LABEL: caller_half:
; ZDINX64: # %bb.0: # %entry
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX64-NEXT: lui a1, 1048560
; ZDINX64-NEXT: or a0, a0, a1
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX64-NEXT: tail h
entry:
%0 = tail call fastcc half @h(half %x)
@@ -60,26 +68,34 @@ define internal fastcc half @h(half %x) nounwind {
;
; ZFINX32-LABEL: h:
; ZFINX32: # %bb.0:
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX32-NEXT: lui a1, 1048560
; ZFINX32-NEXT: or a0, a0, a1
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX32-NEXT: ret
;
; ZFINX64-LABEL: h:
; ZFINX64: # %bb.0:
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX64-NEXT: lui a1, 1048560
; ZFINX64-NEXT: or a0, a0, a1
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX64-NEXT: ret
;
; ZDINX32-LABEL: h:
; ZDINX32: # %bb.0:
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX32-NEXT: lui a1, 1048560
; ZDINX32-NEXT: or a0, a0, a1
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: h:
; ZDINX64: # %bb.0:
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX64-NEXT: lui a1, 1048560
; ZDINX64-NEXT: or a0, a0, a1
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX64-NEXT: ret
ret half %x
}
@@ -220,24 +236,28 @@ define fastcc half @callee_half_32(<32 x half> %A) nounwind {
; ZFINX32: # %bb.0:
; ZFINX32-NEXT: lui a1, 1048560
; ZFINX32-NEXT: or a0, a0, a1
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX32-NEXT: ret
;
; ZFINX64-LABEL: callee_half_32:
; ZFINX64: # %bb.0:
; ZFINX64-NEXT: lui a1, 1048560
; ZFINX64-NEXT: or a0, a0, a1
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX64-NEXT: ret
;
; ZDINX32-LABEL: callee_half_32:
; ZDINX32: # %bb.0:
; ZDINX32-NEXT: lui a1, 1048560
; ZDINX32-NEXT: or a0, a0, a1
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX32-NEXT: ret
;
; ZDINX64-LABEL: callee_half_32:
; ZDINX64: # %bb.0:
; ZDINX64-NEXT: lui a1, 1048560
; ZDINX64-NEXT: or a0, a0, a1
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX64-NEXT: ret
%B = extractelement <32 x half> %A, i32 0
ret half %B
@@ -492,8 +512,10 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZFINX32-NEXT: lw a3, 96(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw a4, 92(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: call callee_half_32
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX32-NEXT: lui a1, 1048560
; ZFINX32-NEXT: or a0, a0, a1
+; ZFINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX32-NEXT: lw ra, 156(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw s0, 152(sp) # 4-byte Folded Reload
; ZFINX32-NEXT: lw s1, 148(sp) # 4-byte Folded Reload
@@ -588,8 +610,10 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZFINX64-NEXT: ld a3, 176(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld a4, 168(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: call callee_half_32
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZFINX64-NEXT: lui a1, 1048560
; ZFINX64-NEXT: or a0, a0, a1
+; ZFINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZFINX64-NEXT: ld ra, 296(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld s0, 288(sp) # 8-byte Folded Reload
; ZFINX64-NEXT: ld s1, 280(sp) # 8-byte Folded Reload
@@ -684,8 +708,10 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZDINX32-NEXT: lw a3, 96(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw a4, 92(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: call callee_half_32
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX32-NEXT: lui a1, 1048560
; ZDINX32-NEXT: or a0, a0, a1
+; ZDINX32-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX32-NEXT: lw ra, 156(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw s0, 152(sp) # 4-byte Folded Reload
; ZDINX32-NEXT: lw s1, 148(sp) # 4-byte Folded Reload
@@ -780,8 +806,10 @@ define half @caller_half_32(<32 x half> %A) nounwind {
; ZDINX64-NEXT: ld a3, 176(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld a4, 168(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: call callee_half_32
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w def $x10
; ZDINX64-NEXT: lui a1, 1048560
; ZDINX64-NEXT: or a0, a0, a1
+; ZDINX64-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; ZDINX64-NEXT: ld ra, 296(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld s0, 288(sp) # 8-byte Folded Reload
; ZDINX64-NEXT: ld s1, 280(sp) # 8-byte Folded Reload
@@ -917,48 +945,48 @@ define float @caller_float_32(<32 x float> %A) nounwind {
;
; ZHINX64-LABEL: caller_float_32:
; ZHINX64: # %bb.0:
-; ZHINX64-NEXT: addi sp, sp, -224
-; ZHINX64-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lw t0, 248(sp)
-; ZHINX64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lw t0, 240(sp)
-; ZHINX64-NEXT: sd t0, 104(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lw t0, 232(sp)
-; ZHINX64-NEXT: sd t0, 96(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: addi sp, sp, -208
+; ZHINX64-NEXT: sd ra, 200(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s0, 192(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s1, 184(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s2, 176(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s3, 168(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s4, 160(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s5, 152(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s6, 144(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s7, 136(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s8, 128(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s9, 120(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s10, 112(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: sd s11, 104(sp) # 8-byte Folded Spill
+; ZHINX64-NEXT: lw t0, 208(sp)
+; ZHINX64-NEXT: sw t0, 100(sp) # 4-byte Folded Spill
+; ZHINX64-NEXT: lw t0, 216(sp)
+; ZHINX64-NEXT: sw t0, 96(sp) # 4-byte Folded Spill
; ZHINX64-NEXT: lw t0, 224(sp)
-; ZHINX64-NEXT: sd t0, 88(sp) # 8-byte Folded Spill
-; ZHINX64-NEXT: lw t6, 256(sp)
-; ZHINX64-NEXT: lw t5, 264(sp)
-; ZHINX64-NEXT: lw t4, 272(sp)
-; ZHINX64-NEXT: lw s0, 280(sp)
-; ZHINX64-NEXT: lw s1, 288(sp)
-; ZHINX64-NEXT: lw s2, 296(sp)
-; ZHINX64-NEXT: lw s3, 304(sp)
-; ZHINX64-NEXT: lw s4, 312(sp)
-; ZHINX64-NEXT: lw s5, 320(sp)
-; ZHINX64-NEXT: lw s6, 328(sp)
-; ZHINX64-NEXT: lw s7, 336(sp)
-; ZHINX64-NEXT: lw s8, 344(sp)
-; ZHINX64-NEXT: lw s9, 352(sp)
-; ZHINX64-NEXT: lw s10, 360(sp)
-; ZHINX64-NEXT: lw s11, 368(sp)
-; ZHINX64-NEXT: lw ra, 376(sp)
-; ZHINX64-NEXT: lw t3, 384(sp)
-; ZHINX64-NEXT: lw t2, 392(sp)
-; ZHINX64-NEXT: lw t1, 400(sp)
-; ZHINX64-NEXT: lw t0, 408(sp)
+; ZHINX64-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; ZHINX64-NEXT: lw t0, 232(sp)
+; ZHINX64-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
+; ZHINX64-NEXT: lw t6, 240(sp)
+; ZHINX64-NEXT: lw t5, 248(sp)
+; ZHINX64-NEXT: lw t4, 256(sp)
+; ZHINX64-NEXT: lw s0, 264(sp)
+; ZHINX64-NEXT: lw s1, 272(sp)
+; ZHINX64-NEXT: lw s2, 280(sp)
+; ZHINX64-NEXT: lw s3, 288(sp)
+; ZHINX64-NEXT: lw s4, 296(sp)
+; ZHINX64-NEXT: lw s5, 304(sp)
+; ZHINX64-NEXT: lw s6, 312(sp)
+; ZHINX64-NEXT: lw s7, 320(sp)
+; ZHINX64-NEXT: lw s8, 328(sp)
+; ZHINX64-NEXT: lw s9, 336(sp)
+; ZHINX64-NEXT: lw s10, 344(sp)
+; ZHINX64-NEXT: lw s11, 352(sp)
+; ZHINX64-NEXT: lw ra, 360(sp)
+; ZHINX64-NEXT: lw t3, 368(sp)
+; ZHINX64-NEXT: lw t2, 376(sp)
+; ZHINX64-NEXT: lw t1, 384(sp)
+; ZHINX64-NEXT: lw t0, 392(sp)
; ZHINX64-NEXT: sw t0, 76(sp)
; ZHINX64-NEXT: sw t1, 72(sp)
; ZHINX64-NEXT: sw t2, 68(sp)
@@ -979,25 +1007,25 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZHINX64-NEXT: sw t4, 8(sp)
; ZHINX64-NEXT: sw t5, 4(sp)
; ZHINX64-NEXT: sw t6, 0(sp)
-; ZHINX64-NEXT: ld t3, 88(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t5, 104(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld t6, 112(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: lw t3, 100(sp) # 4-byte Folded Reload
+; ZHINX64-NEXT: lw t4, 96(sp) # 4-byte Folded Reload
+; ZHINX64-NEXT: lw t5, 92(sp) # 4-byte Folded Reload
+; ZHINX64-NEXT: lw t6, 88(sp) # 4-byte Folded Reload
; ZHINX64-NEXT: call callee_float_32
-; ZHINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; ZHINX64-NEXT: addi sp, sp, 224
+; ZHINX64-NEXT: ld ra, 200(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s0, 192(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s1, 184(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s2, 176(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s3, 168(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s4, 160(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s5, 152(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s6, 144(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s7, 136(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s8, 128(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s9, 120(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s10, 112(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: ld s11, 104(sp) # 8-byte Folded Reload
+; ZHINX64-NEXT: addi sp, sp, 208
; ZHINX64-NEXT: ret
;
; ZFINX32-LABEL: caller_float_32:
@@ -1087,48 +1115,48 @@ define float @caller_float_32(<32 x float> %A) nounwind {
;
; ZFINX64-LABEL: caller_float_32:
; ZFINX64: # %bb.0:
-; ZFINX64-NEXT: addi sp, sp, -224
-; ZFINX64-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: lw t0, 248(sp)
-; ZFINX64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: lw t0, 240(sp)
-; ZFINX64-NEXT: sd t0, 104(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: lw t0, 232(sp)
-; ZFINX64-NEXT: sd t0, 96(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: addi sp, sp, -208
+; ZFINX64-NEXT: sd ra, 200(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s0, 192(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s1, 184(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s2, 176(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s3, 168(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s4, 160(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s5, 152(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s6, 144(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s7, 136(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s8, 128(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s9, 120(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s10, 112(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: sd s11, 104(sp) # 8-byte Folded Spill
+; ZFINX64-NEXT: lw t0, 208(sp)
+; ZFINX64-NEXT: sw t0, 100(sp) # 4-byte Folded Spill
+; ZFINX64-NEXT: lw t0, 216(sp)
+; ZFINX64-NEXT: sw t0, 96(sp) # 4-byte Folded Spill
; ZFINX64-NEXT: lw t0, 224(sp)
-; ZFINX64-NEXT: sd t0, 88(sp) # 8-byte Folded Spill
-; ZFINX64-NEXT: lw t6, 256(sp)
-; ZFINX64-NEXT: lw t5, 264(sp)
-; ZFINX64-NEXT: lw t4, 272(sp)
-; ZFINX64-NEXT: lw s0, 280(sp)
-; ZFINX64-NEXT: lw s1, 288(sp)
-; ZFINX64-NEXT: lw s2, 296(sp)
-; ZFINX64-NEXT: lw s3, 304(sp)
-; ZFINX64-NEXT: lw s4, 312(sp)
-; ZFINX64-NEXT: lw s5, 320(sp)
-; ZFINX64-NEXT: lw s6, 328(sp)
-; ZFINX64-NEXT: lw s7, 336(sp)
-; ZFINX64-NEXT: lw s8, 344(sp)
-; ZFINX64-NEXT: lw s9, 352(sp)
-; ZFINX64-NEXT: lw s10, 360(sp)
-; ZFINX64-NEXT: lw s11, 368(sp)
-; ZFINX64-NEXT: lw ra, 376(sp)
-; ZFINX64-NEXT: lw t3, 384(sp)
-; ZFINX64-NEXT: lw t2, 392(sp)
-; ZFINX64-NEXT: lw t1, 400(sp)
-; ZFINX64-NEXT: lw t0, 408(sp)
+; ZFINX64-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; ZFINX64-NEXT: lw t0, 232(sp)
+; ZFINX64-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
+; ZFINX64-NEXT: lw t6, 240(sp)
+; ZFINX64-NEXT: lw t5, 248(sp)
+; ZFINX64-NEXT: lw t4, 256(sp)
+; ZFINX64-NEXT: lw s0, 264(sp)
+; ZFINX64-NEXT: lw s1, 272(sp)
+; ZFINX64-NEXT: lw s2, 280(sp)
+; ZFINX64-NEXT: lw s3, 288(sp)
+; ZFINX64-NEXT: lw s4, 296(sp)
+; ZFINX64-NEXT: lw s5, 304(sp)
+; ZFINX64-NEXT: lw s6, 312(sp)
+; ZFINX64-NEXT: lw s7, 320(sp)
+; ZFINX64-NEXT: lw s8, 328(sp)
+; ZFINX64-NEXT: lw s9, 336(sp)
+; ZFINX64-NEXT: lw s10, 344(sp)
+; ZFINX64-NEXT: lw s11, 352(sp)
+; ZFINX64-NEXT: lw ra, 360(sp)
+; ZFINX64-NEXT: lw t3, 368(sp)
+; ZFINX64-NEXT: lw t2, 376(sp)
+; ZFINX64-NEXT: lw t1, 384(sp)
+; ZFINX64-NEXT: lw t0, 392(sp)
; ZFINX64-NEXT: sw t0, 76(sp)
; ZFINX64-NEXT: sw t1, 72(sp)
; ZFINX64-NEXT: sw t2, 68(sp)
@@ -1149,25 +1177,25 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZFINX64-NEXT: sw t4, 8(sp)
; ZFINX64-NEXT: sw t5, 4(sp)
; ZFINX64-NEXT: sw t6, 0(sp)
-; ZFINX64-NEXT: ld t3, 88(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld t5, 104(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld t6, 112(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: lw t3, 100(sp) # 4-byte Folded Reload
+; ZFINX64-NEXT: lw t4, 96(sp) # 4-byte Folded Reload
+; ZFINX64-NEXT: lw t5, 92(sp) # 4-byte Folded Reload
+; ZFINX64-NEXT: lw t6, 88(sp) # 4-byte Folded Reload
; ZFINX64-NEXT: call callee_float_32
-; ZFINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; ZFINX64-NEXT: addi sp, sp, 224
+; ZFINX64-NEXT: ld ra, 200(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s0, 192(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s1, 184(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s2, 176(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s3, 168(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s4, 160(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s5, 152(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s6, 144(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s7, 136(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s8, 128(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s9, 120(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s10, 112(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: ld s11, 104(sp) # 8-byte Folded Reload
+; ZFINX64-NEXT: addi sp, sp, 208
; ZFINX64-NEXT: ret
;
; ZDINX32-LABEL: caller_float_32:
@@ -1257,48 +1285,48 @@ define float @caller_float_32(<32 x float> %A) nounwind {
;
; ZDINX64-LABEL: caller_float_32:
; ZDINX64: # %bb.0:
-; ZDINX64-NEXT: addi sp, sp, -224
-; ZDINX64-NEXT: sd ra, 216(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s0, 208(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s1, 200(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s2, 192(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s3, 184(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s4, 176(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s5, 168(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s6, 160(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s7, 152(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s8, 144(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s9, 136(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s10, 128(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: sd s11, 120(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: lw t0, 248(sp)
-; ZDINX64-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: lw t0, 240(sp)
-; ZDINX64-NEXT: sd t0, 104(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: lw t0, 232(sp)
-; ZDINX64-NEXT: sd t0, 96(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: addi sp, sp, -208
+; ZDINX64-NEXT: sd ra, 200(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s0, 192(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s1, 184(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s2, 176(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s3, 168(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s4, 160(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s5, 152(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s6, 144(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s7, 136(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s8, 128(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s9, 120(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s10, 112(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: sd s11, 104(sp) # 8-byte Folded Spill
+; ZDINX64-NEXT: lw t0, 208(sp)
+; ZDINX64-NEXT: sw t0, 100(sp) # 4-byte Folded Spill
+; ZDINX64-NEXT: lw t0, 216(sp)
+; ZDINX64-NEXT: sw t0, 96(sp) # 4-byte Folded Spill
; ZDINX64-NEXT: lw t0, 224(sp)
-; ZDINX64-NEXT: sd t0, 88(sp) # 8-byte Folded Spill
-; ZDINX64-NEXT: lw t6, 256(sp)
-; ZDINX64-NEXT: lw t5, 264(sp)
-; ZDINX64-NEXT: lw t4, 272(sp)
-; ZDINX64-NEXT: lw s0, 280(sp)
-; ZDINX64-NEXT: lw s1, 288(sp)
-; ZDINX64-NEXT: lw s2, 296(sp)
-; ZDINX64-NEXT: lw s3, 304(sp)
-; ZDINX64-NEXT: lw s4, 312(sp)
-; ZDINX64-NEXT: lw s5, 320(sp)
-; ZDINX64-NEXT: lw s6, 328(sp)
-; ZDINX64-NEXT: lw s7, 336(sp)
-; ZDINX64-NEXT: lw s8, 344(sp)
-; ZDINX64-NEXT: lw s9, 352(sp)
-; ZDINX64-NEXT: lw s10, 360(sp)
-; ZDINX64-NEXT: lw s11, 368(sp)
-; ZDINX64-NEXT: lw ra, 376(sp)
-; ZDINX64-NEXT: lw t3, 384(sp)
-; ZDINX64-NEXT: lw t2, 392(sp)
-; ZDINX64-NEXT: lw t1, 400(sp)
-; ZDINX64-NEXT: lw t0, 408(sp)
+; ZDINX64-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
+; ZDINX64-NEXT: lw t0, 232(sp)
+; ZDINX64-NEXT: sw t0, 88(sp) # 4-byte Folded Spill
+; ZDINX64-NEXT: lw t6, 240(sp)
+; ZDINX64-NEXT: lw t5, 248(sp)
+; ZDINX64-NEXT: lw t4, 256(sp)
+; ZDINX64-NEXT: lw s0, 264(sp)
+; ZDINX64-NEXT: lw s1, 272(sp)
+; ZDINX64-NEXT: lw s2, 280(sp)
+; ZDINX64-NEXT: lw s3, 288(sp)
+; ZDINX64-NEXT: lw s4, 296(sp)
+; ZDINX64-NEXT: lw s5, 304(sp)
+; ZDINX64-NEXT: lw s6, 312(sp)
+; ZDINX64-NEXT: lw s7, 320(sp)
+; ZDINX64-NEXT: lw s8, 328(sp)
+; ZDINX64-NEXT: lw s9, 336(sp)
+; ZDINX64-NEXT: lw s10, 344(sp)
+; ZDINX64-NEXT: lw s11, 352(sp)
+; ZDINX64-NEXT: lw ra, 360(sp)
+; ZDINX64-NEXT: lw t3, 368(sp)
+; ZDINX64-NEXT: lw t2, 376(sp)
+; ZDINX64-NEXT: lw t1, 384(sp)
+; ZDINX64-NEXT: lw t0, 392(sp)
; ZDINX64-NEXT: sw t0, 76(sp)
; ZDINX64-NEXT: sw t1, 72(sp)
; ZDINX64-NEXT: sw t2, 68(sp)
@@ -1319,25 +1347,25 @@ define float @caller_float_32(<32 x float> %A) nounwind {
; ZDINX64-NEXT: sw t4, 8(sp)
; ZDINX64-NEXT: sw t5, 4(sp)
; ZDINX64-NEXT: sw t6, 0(sp)
-; ZDINX64-NEXT: ld t3, 88(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld t4, 96(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld t5, 104(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld t6, 112(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: lw t3, 100(sp) # 4-byte Folded Reload
+; ZDINX64-NEXT: lw t4, 96(sp) # 4-byte Folded Reload
+; ZDINX64-NEXT: lw t5, 92(sp) # 4-byte Folded Reload
+; ZDINX64-NEXT: lw t6, 88(sp) # 4-byte Folded Reload
; ZDINX64-NEXT: call callee_float_32
-; ZDINX64-NEXT: ld ra, 216(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s0, 208(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s1, 200(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s2, 192(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s3, 184(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s4, 176(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s5, 168(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s6, 160(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s7, 152(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s8, 144(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s9, 136(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s10, 128(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: ld s11, 120(sp) # 8-byte Folded Reload
-; ZDINX64-NEXT: addi sp, sp, 224
+; ZDINX64-NEXT: ld ra, 200(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s0, 192(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s1, 184(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s2, 176(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s3, 168(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s4, 160(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s5, 152(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s6, 144(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s7, 136(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s8, 128(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s9, 120(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s10, 112(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: ld s11, 104(sp) # 8-byte Folded Reload
+; ZDINX64-NEXT: addi sp, sp, 208
; ZDINX64-NEXT: ret
%C = call fastcc float @callee_float_32(<32 x float> %A)
ret float %C
diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index 3f32734db0ba71..bf500d1a2adb39 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -706,18 +706,11 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fneg.s fa0, fa5
; CHECKIF-NEXT: ret
;
-; RV32IZFINX-LABEL: fnmadd_s_3:
-; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: fmadd.s a0, a0, a1, a2
-; RV32IZFINX-NEXT: fneg.s a0, a0
-; RV32IZFINX-NEXT: ret
-;
-; RV64IZFINX-LABEL: fnmadd_s_3:
-; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2
-; RV64IZFINX-NEXT: lui a1, 524288
-; RV64IZFINX-NEXT: xor a0, a0, a1
-; RV64IZFINX-NEXT: ret
+; CHECKIZFINX-LABEL: fnmadd_s_3:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: fneg.s a0, a0
+; CHECKIZFINX-NEXT: ret
;
; RV32I-LABEL: fnmadd_s_3:
; RV32I: # %bb.0:
@@ -761,17 +754,10 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
-; RV32IZFINX-LABEL: fnmadd_nsz:
-; RV32IZFINX: # %bb.0:
-; RV32IZFINX-NEXT: fnmadd.s a0, a0, a1, a2
-; RV32IZFINX-NEXT: ret
-;
-; RV64IZFINX-LABEL: fnmadd_nsz:
-; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2
-; RV64IZFINX-NEXT: lui a1, 524288
-; RV64IZFINX-NEXT: xor a0, a0, a1
-; RV64IZFINX-NEXT: ret
+; CHECKIZFINX-LABEL: fnmadd_nsz:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fnmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
;
; RV32I-LABEL: fnmadd_nsz:
; RV32I: # %bb.0:
@@ -1247,3 +1233,6 @@ define float @fsgnjx_f32(float %x, float %y) nounwind {
%mul = fmul float %z, %y
ret float %mul
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32IZFINX: {{.*}}
+; RV64IZFINX: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
index 2338219687ef75..86f6f079243c26 100644
--- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
@@ -50,8 +50,7 @@ define float @fneg(float %a) nounwind {
;
; RV64IZFINX-LABEL: fneg:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: lui a1, 524288
-; RV64IZFINX-NEXT: xor a0, a0, a1
+; RV64IZFINX-NEXT: fneg.s a0, a0
; RV64IZFINX-NEXT: ret
%1 = fneg float %a
ret float %1
@@ -91,8 +90,7 @@ define float @fabs(float %a) nounwind {
;
; RV64IZFINX-LABEL: fabs:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: slli a0, a0, 33
-; RV64IZFINX-NEXT: srli a0, a0, 33
+; RV64IZFINX-NEXT: fabs.s a0, a0
; RV64IZFINX-NEXT: ret
%1 = call float @llvm.fabs.f32(float %a)
ret float %1
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 35caa627b57bc3..20c8a2f4ceefe9 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -1003,12 +1003,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV32IZFINX: # %bb.0: # %entry
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: li a0, 0
+; RV32IZFINX-NEXT: fmv.s a0, zero
; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
; RV32IZFINX-NEXT: # %bb.1: # %if.end
-; RV32IZFINX-NEXT: li a0, 0
+; RV32IZFINX-NEXT: fmv.s a0, zero
; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
@@ -1024,12 +1024,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IZFINX: # %bb.0: # %entry
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: li a0, 0
+; RV64IZFINX-NEXT: fmv.s a0, zero
; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
; RV64IZFINX-NEXT: # %bb.1: # %if.end
-; RV64IZFINX-NEXT: li a0, 0
+; RV64IZFINX-NEXT: fmv.s a0, zero
; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 031976b4fa2b21..0ffd5ff9c37a4f 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -671,10 +671,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -905,10 +905,10 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: fle.s a0, zero, a0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/float-frem.ll b/llvm/test/CodeGen/RISCV/float-frem.ll
index 651b1b116adc76..31d39a5ab6d6ea 100644
--- a/llvm/test/CodeGen/RISCV/float-frem.ll
+++ b/llvm/test/CodeGen/RISCV/float-frem.ll
@@ -27,12 +27,7 @@ define float @frem_f32(float %a, float %b) nounwind {
;
; RV64IZFINX-LABEL: frem_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call fmodf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail fmodf
;
; RV32I-LABEL: frem_f32:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
index 69a506cd850f2c..1f82dc17d7cff9 100644
--- a/llvm/test/CodeGen/RISCV/float-imm.ll
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -20,12 +20,14 @@ define float @float_imm() nounwind {
; RV32ZFINX: # %bb.0:
; RV32ZFINX-NEXT: lui a0, 263313
; RV32ZFINX-NEXT: addi a0, a0, -37
+; RV32ZFINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV32ZFINX-NEXT: ret
;
; RV64ZFINX-LABEL: float_imm:
; RV64ZFINX: # %bb.0:
; RV64ZFINX-NEXT: lui a0, 263313
; RV64ZFINX-NEXT: addiw a0, a0, -37
+; RV64ZFINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV64ZFINX-NEXT: ret
ret float 3.14159274101257324218750
}
@@ -55,7 +57,7 @@ define float @float_positive_zero(ptr %pf) nounwind {
;
; CHECKZFINX-LABEL: float_positive_zero:
; CHECKZFINX: # %bb.0:
-; CHECKZFINX-NEXT: li a0, 0
+; CHECKZFINX-NEXT: fmv.s a0, zero
; CHECKZFINX-NEXT: ret
ret float 0.0
}
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index cbd84634de11c0..73f73a6ad99086 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -279,10 +279,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: call sinf
-; RV32IZFINX-NEXT: mv s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s s1, a0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -297,10 +297,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: fmv.s s0, a0
; RV64IZFINX-NEXT: call sinf
-; RV64IZFINX-NEXT: mv s1, a0
-; RV64IZFINX-NEXT: mv a0, s0
+; RV64IZFINX-NEXT: fmv.s s1, a0
+; RV64IZFINX-NEXT: fmv.s a0, s0
; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 52442026dab502..1121a37e5d7e9a 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -136,12 +136,7 @@ define float @sin_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: sin_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call sinf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail sinf
;
; RV32I-LABEL: sin_f32:
; RV32I: # %bb.0:
@@ -181,12 +176,7 @@ define float @cos_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: cos_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call cosf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail cosf
;
; RV32I-LABEL: cos_f32:
; RV32I: # %bb.0:
@@ -235,10 +225,10 @@ define float @sincos_f32(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: call sinf
-; RV32IZFINX-NEXT: mv s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s s1, a0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -253,10 +243,10 @@ define float @sincos_f32(float %a) nounwind {
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: fmv.s s0, a0
; RV64IZFINX-NEXT: call sinf
-; RV64IZFINX-NEXT: mv s1, a0
-; RV64IZFINX-NEXT: mv a0, s0
+; RV64IZFINX-NEXT: fmv.s s1, a0
+; RV64IZFINX-NEXT: fmv.s a0, s0
; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -327,12 +317,7 @@ define float @pow_f32(float %a, float %b) nounwind {
;
; RV64IZFINX-LABEL: pow_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call powf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail powf
;
; RV32I-LABEL: pow_f32:
; RV32I: # %bb.0:
@@ -372,12 +357,7 @@ define float @exp_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: exp_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call expf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail expf
;
; RV32I-LABEL: exp_f32:
; RV32I: # %bb.0:
@@ -417,12 +397,7 @@ define float @exp2_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: exp2_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call exp2f
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail exp2f
;
; RV32I-LABEL: exp2_f32:
; RV32I: # %bb.0:
@@ -462,12 +437,7 @@ define float @log_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: log_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call logf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail logf
;
; RV32I-LABEL: log_f32:
; RV32I: # %bb.0:
@@ -507,12 +477,7 @@ define float @log10_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: log10_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log10f
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail log10f
;
; RV32I-LABEL: log10_f32:
; RV32I: # %bb.0:
@@ -552,12 +517,7 @@ define float @log2_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: log2_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call log2f
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail log2f
;
; RV32I-LABEL: log2_f32:
; RV32I: # %bb.0:
@@ -698,8 +658,7 @@ define float @fabs_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: fabs_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: slli a0, a0, 33
-; RV64IZFINX-NEXT: srli a0, a0, 33
+; RV64IZFINX-NEXT: fabs.s a0, a0
; RV64IZFINX-NEXT: ret
;
; RV32I-LABEL: fabs_f32:
@@ -1195,12 +1154,7 @@ define float @nearbyint_f32(float %a) nounwind {
;
; RV64IZFINX-LABEL: nearbyint_f32:
; RV64IZFINX: # %bb.0:
-; RV64IZFINX-NEXT: addi sp, sp, -16
-; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: call nearbyintf
-; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64IZFINX-NEXT: addi sp, sp, 16
-; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: tail nearbyintf
;
; RV32I-LABEL: nearbyint_f32:
; RV32I: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
index 0e00dff0b64245..14f422f6c7a02a 100644
--- a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
@@ -43,7 +43,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV32IZFINX-LABEL: fminimum_f32:
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: feq.s a3, a0, a0
-; RV32IZFINX-NEXT: mv a2, a1
+; RV32IZFINX-NEXT: fmv.s a2, a1
; RV32IZFINX-NEXT: beqz a3, .LBB0_3
; RV32IZFINX-NEXT: # %bb.1:
; RV32IZFINX-NEXT: feq.s a3, a1, a1
@@ -52,7 +52,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: fmin.s a0, a0, a2
; RV32IZFINX-NEXT: ret
; RV32IZFINX-NEXT: .LBB0_3:
-; RV32IZFINX-NEXT: mv a2, a0
+; RV32IZFINX-NEXT: fmv.s a2, a0
; RV32IZFINX-NEXT: feq.s a3, a1, a1
; RV32IZFINX-NEXT: bnez a3, .LBB0_2
; RV32IZFINX-NEXT: .LBB0_4:
@@ -81,7 +81,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV64IZFINX-LABEL: fminimum_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: feq.s a3, a0, a0
-; RV64IZFINX-NEXT: mv a2, a1
+; RV64IZFINX-NEXT: fmv.s a2, a1
; RV64IZFINX-NEXT: beqz a3, .LBB0_3
; RV64IZFINX-NEXT: # %bb.1:
; RV64IZFINX-NEXT: feq.s a3, a1, a1
@@ -90,7 +90,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: fmin.s a0, a0, a2
; RV64IZFINX-NEXT: ret
; RV64IZFINX-NEXT: .LBB0_3:
-; RV64IZFINX-NEXT: mv a2, a0
+; RV64IZFINX-NEXT: fmv.s a2, a0
; RV64IZFINX-NEXT: feq.s a3, a1, a1
; RV64IZFINX-NEXT: bnez a3, .LBB0_2
; RV64IZFINX-NEXT: .LBB0_4:
@@ -125,7 +125,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV32IZFINX-LABEL: fmaximum_f32:
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: feq.s a3, a0, a0
-; RV32IZFINX-NEXT: mv a2, a1
+; RV32IZFINX-NEXT: fmv.s a2, a1
; RV32IZFINX-NEXT: beqz a3, .LBB1_3
; RV32IZFINX-NEXT: # %bb.1:
; RV32IZFINX-NEXT: feq.s a3, a1, a1
@@ -134,7 +134,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: fmax.s a0, a0, a2
; RV32IZFINX-NEXT: ret
; RV32IZFINX-NEXT: .LBB1_3:
-; RV32IZFINX-NEXT: mv a2, a0
+; RV32IZFINX-NEXT: fmv.s a2, a0
; RV32IZFINX-NEXT: feq.s a3, a1, a1
; RV32IZFINX-NEXT: bnez a3, .LBB1_2
; RV32IZFINX-NEXT: .LBB1_4:
@@ -163,7 +163,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV64IZFINX-LABEL: fmaximum_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: feq.s a3, a0, a0
-; RV64IZFINX-NEXT: mv a2, a1
+; RV64IZFINX-NEXT: fmv.s a2, a1
; RV64IZFINX-NEXT: beqz a3, .LBB1_3
; RV64IZFINX-NEXT: # %bb.1:
; RV64IZFINX-NEXT: feq.s a3, a1, a1
@@ -172,7 +172,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: fmax.s a0, a0, a2
; RV64IZFINX-NEXT: ret
; RV64IZFINX-NEXT: .LBB1_3:
-; RV64IZFINX-NEXT: mv a2, a0
+; RV64IZFINX-NEXT: fmv.s a2, a0
; RV64IZFINX-NEXT: feq.s a3, a1, a1
; RV64IZFINX-NEXT: bnez a3, .LBB1_2
; RV64IZFINX-NEXT: .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index 3779d39a753e1e..3038a03d1ada00 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -170,7 +170,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: addi a0, sp, 4
; RV32IZFINX-NEXT: call notdead
; RV32IZFINX-NEXT: lw a0, 4(sp)
@@ -185,7 +185,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV64IZFINX-NEXT: addi sp, sp, -32
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: fmv.s s0, a0
; RV64IZFINX-NEXT: addi a0, sp, 12
; RV64IZFINX-NEXT: call notdead
; RV64IZFINX-NEXT: lw a0, 12(sp)
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index 198b18c75272a9..dbfecd94779bed 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -96,7 +96,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -108,7 +108,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB1_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -225,7 +225,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -237,7 +237,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB3_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -354,7 +354,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -366,7 +366,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB5_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -483,7 +483,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -495,7 +495,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB7_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -612,7 +612,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -624,7 +624,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB9_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -741,7 +741,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -753,7 +753,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB11_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -870,7 +870,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -882,7 +882,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB13_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -999,7 +999,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1011,7 +1011,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB15_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -1128,7 +1128,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1140,7 +1140,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB17_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -1257,7 +1257,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1269,7 +1269,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB19_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -1386,7 +1386,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1398,7 +1398,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB21_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -1515,7 +1515,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fmv.s s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1527,7 +1527,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB23_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: fmv.s a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
index a2ff0d33e2d31a..0e0d080373756f 100644
--- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
@@ -16,7 +16,7 @@ define float @select_fcmp_false(float %a, float %b) nounwind {
;
; CHECKZFINX-LABEL: select_fcmp_false:
; CHECKZFINX: # %bb.0:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: ret
%1 = fcmp false float %a, %b
%2 = select i1 %1, float %a, float %b
@@ -38,7 +38,7 @@ define float @select_fcmp_oeq(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: feq.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB1_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB1_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp oeq float %a, %b
@@ -61,7 +61,7 @@ define float @select_fcmp_ogt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a1, a0
; CHECKZFINX-NEXT: bnez a2, .LBB2_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB2_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ogt float %a, %b
@@ -84,7 +84,7 @@ define float @select_fcmp_oge(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a1, a0
; CHECKZFINX-NEXT: bnez a2, .LBB3_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB3_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp oge float %a, %b
@@ -107,7 +107,7 @@ define float @select_fcmp_olt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB4_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB4_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp olt float %a, %b
@@ -130,7 +130,7 @@ define float @select_fcmp_ole(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB5_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB5_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ole float %a, %b
@@ -157,7 +157,7 @@ define float @select_fcmp_one(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: or a2, a3, a2
; CHECKZFINX-NEXT: bnez a2, .LBB6_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB6_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp one float %a, %b
@@ -184,7 +184,7 @@ define float @select_fcmp_ord(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: and a2, a3, a2
; CHECKZFINX-NEXT: bnez a2, .LBB7_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB7_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ord float %a, %b
@@ -211,7 +211,7 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: or a2, a3, a2
; CHECKZFINX-NEXT: beqz a2, .LBB8_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB8_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ueq float %a, %b
@@ -234,7 +234,7 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB9_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB9_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ugt float %a, %b
@@ -257,7 +257,7 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB10_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB10_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp uge float %a, %b
@@ -280,7 +280,7 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a1, a0
; CHECKZFINX-NEXT: beqz a2, .LBB11_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB11_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ult float %a, %b
@@ -303,7 +303,7 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a1, a0
; CHECKZFINX-NEXT: beqz a2, .LBB12_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB12_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ule float %a, %b
@@ -326,7 +326,7 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: feq.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB13_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB13_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp une float %a, %b
@@ -353,7 +353,7 @@ define float @select_fcmp_uno(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: and a2, a3, a2
; CHECKZFINX-NEXT: beqz a2, .LBB14_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: fmv.s a0, a1
; CHECKZFINX-NEXT: .LBB14_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp uno float %a, %b
diff --git a/llvm/test/CodeGen/RISCV/float-select-icmp.ll b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
index e8f420f1b07254..fd899b4ad45f6c 100644
--- a/llvm/test/CodeGen/RISCV/float-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
@@ -21,9 +21,9 @@ define float @select_icmp_eq(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: beq a0, a1, .LBB0_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB0_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp eq i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -43,9 +43,9 @@ define float @select_icmp_ne(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bne a0, a1, .LBB1_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB1_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ne i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -65,9 +65,9 @@ define float @select_icmp_ugt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bltu a1, a0, .LBB2_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB2_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ugt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -87,9 +87,9 @@ define float @select_icmp_uge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bgeu a0, a1, .LBB3_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB3_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp uge i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -109,9 +109,9 @@ define float @select_icmp_ult(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bltu a0, a1, .LBB4_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB4_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -131,9 +131,9 @@ define float @select_icmp_ule(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bgeu a1, a0, .LBB5_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB5_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ule i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -153,9 +153,9 @@ define float @select_icmp_sgt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: blt a1, a0, .LBB6_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB6_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sgt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -175,9 +175,9 @@ define float @select_icmp_sge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bge a0, a1, .LBB7_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB7_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sge i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -197,9 +197,9 @@ define float @select_icmp_slt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: blt a0, a1, .LBB8_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB8_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp slt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -219,9 +219,9 @@ define float @select_icmp_sle(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bge a1, a0, .LBB9_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: fmv.s a2, a3
; CHECKZFINX-NEXT: .LBB9_2:
-; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: fmv.s a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sle i32 %a, %b
%2 = select i1 %1, float %c, float %d
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 0c84a08f1fd451..e6395de7458f99 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2246,7 +2246,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -2293,7 +2293,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZDINXZHINX-NEXT: lui a0, 913408
; RV32IZDINXZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZDINXZHINX-NEXT: mv a0, s0
+; RV32IZDINXZHINX-NEXT: fmv.s a0, s0
; RV32IZDINXZHINX-NEXT: call __fixsfdi
; RV32IZDINXZHINX-NEXT: lui a4, 524288
; RV32IZDINXZHINX-NEXT: lui a2, 524288
@@ -2637,7 +2637,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0
; CHECK32-IZHINXMIN-NEXT: lui a0, 913408
; CHECK32-IZHINXMIN-NEXT: fle.s s1, a0, s0
-; CHECK32-IZHINXMIN-NEXT: mv a0, s0
+; CHECK32-IZHINXMIN-NEXT: fmv.s a0, s0
; CHECK32-IZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZHINXMIN-NEXT: lui a4, 524288
; CHECK32-IZHINXMIN-NEXT: lui a2, 524288
@@ -2685,7 +2685,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0
; CHECK32-IZDINXZHINXMIN-NEXT: lui a0, 913408
; CHECK32-IZDINXZHINXMIN-NEXT: fle.s s1, a0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: fmv.s a0, s0
; CHECK32-IZDINXZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lui a4, 524288
; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 524288
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 81e29329e71817..2d6d41bc6474f6 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -153,8 +153,8 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZHINX: # %bb.0:
; RV64IZHINX-NEXT: addi sp, sp, -16
; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: sext.w a1, a1
; RV64IZHINX-NEXT: fcvt.s.h a0, a0
+; RV64IZHINX-NEXT: sext.w a1, a1
; RV64IZHINX-NEXT: call __powisf2
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -233,8 +233,8 @@ define half @powi_f16(half %a, i32 %b) nounwind {
; RV64IZHINXMIN: # %bb.0:
; RV64IZHINXMIN-NEXT: addi sp, sp, -16
; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZHINXMIN-NEXT: sext.w a1, a1
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
+; RV64IZHINXMIN-NEXT: sext.w a1, a1
; RV64IZHINXMIN-NEXT: call __powisf2
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -533,10 +533,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call sinf
; RV32IZHINX-NEXT: fcvt.h.s s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call cosf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: fadd.h a0, s1, a0
@@ -553,10 +553,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h s0, a0
-; RV64IZHINX-NEXT: mv a0, s0
+; RV64IZHINX-NEXT: fmv.s a0, s0
; RV64IZHINX-NEXT: call sinf
; RV64IZHINX-NEXT: fcvt.h.s s1, a0
-; RV64IZHINX-NEXT: mv a0, s0
+; RV64IZHINX-NEXT: fmv.s a0, s0
; RV64IZHINX-NEXT: call cosf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: fadd.h a0, s1, a0
@@ -775,10 +775,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call sinf
; RV32IZHINXMIN-NEXT: fcvt.h.s s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call cosf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
@@ -798,10 +798,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h s0, a0
-; RV64IZHINXMIN-NEXT: mv a0, s0
+; RV64IZHINXMIN-NEXT: fmv.s a0, s0
; RV64IZHINXMIN-NEXT: call sinf
; RV64IZHINXMIN-NEXT: fcvt.h.s s1, a0
-; RV64IZHINXMIN-NEXT: mv a0, s0
+; RV64IZHINXMIN-NEXT: fmv.s a0, s0
; RV64IZHINXMIN-NEXT: call cosf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index 9e1a26e74d70b9..b83f121e07f6e7 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -170,7 +170,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -311,7 +311,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -568,7 +568,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -683,7 +683,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -878,7 +878,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -1019,7 +1019,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -1276,7 +1276,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -1391,7 +1391,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -1586,7 +1586,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -1727,7 +1727,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -1984,7 +1984,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -2099,7 +2099,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -2294,7 +2294,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -2435,7 +2435,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -2692,7 +2692,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -2807,7 +2807,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -3002,7 +3002,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -3143,7 +3143,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -3400,7 +3400,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -3515,7 +3515,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -3710,7 +3710,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -3851,7 +3851,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -4108,7 +4108,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: mv a0, s0
+; RV32IZHINX-NEXT: fmv.s a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -4223,7 +4223,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: mv a0, s0
+; RV32IZHINXMIN-NEXT: fmv.s a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/kcfi-mir.ll b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
index e478930d59abc5..f35be0564cb25f 100644
--- a/llvm/test/CodeGen/RISCV/kcfi-mir.ll
+++ b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
@@ -10,7 +10,7 @@ define void @f1(ptr noundef %x) !kcfi_type !1 {
; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 16
; CHECK-NEXT: SD killed $x1, $x2, 8 :: (store (s64) into %stack.0)
; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $x1, -8
- ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_h, implicit-def dead $x1, implicit-def $x2, implicit-def $x2_h, implicit killed $x10 {
+ ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_w, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_w, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_w, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_w, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_w, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_w, implicit-def $x31_h, implicit-def dead $x1, implicit-def $x2, implicit-def $x2_w, implicit-def $x2_h, implicit killed $x10 {
; CHECK-NEXT: KCFI_CHECK $x10, 12345678, implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31
; CHECK-NEXT: PseudoCALLIndirect killed $x10, csr_ilp32_lp64, implicit-def dead $x1, implicit-def $x2
; CHECK-NEXT: }
@@ -26,7 +26,7 @@ define void @f2(ptr noundef %x) #0 {
; CHECK: bb.0 (%ir-block.0):
; CHECK-NEXT: liveins: $x10
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_h, implicit killed $x10, implicit $x2 {
+ ; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_w, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_w, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_w, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_w, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_w, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_w, implicit-def $x31_h, implicit killed $x10, implicit $x2 {
; CHECK-NEXT: KCFI_CHECK $x10, 12345678, implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31
; CHECK-NEXT: PseudoTAILIndirect killed $x10, implicit $x2
; CHECK-NEXT: }
diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
index 30f9dd1e516585..01719bf9f18872 100644
--- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
@@ -62,8 +62,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: call __truncsfhf2
; RV32IZFINXZDINX-NEXT: lw a1, 8(sp)
+; RV32IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
; RV32IZFINXZDINX-NEXT: lui a2, 1048560
; RV32IZFINXZDINX-NEXT: or a0, a0, a2
+; RV32IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -77,8 +79,10 @@ define { half, i32 } @test_frexp_f16_i32(half %a) nounwind {
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: call __truncsfhf2
; RV64IZFINXZDINX-NEXT: ld a1, 0(sp)
+; RV64IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
; RV64IZFINXZDINX-NEXT: lui a2, 1048560
; RV64IZFINXZDINX-NEXT: or a0, a0, a2
+; RV64IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -157,8 +161,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: call __truncsfhf2
+; RV32IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
; RV32IZFINXZDINX-NEXT: lui a1, 1048560
; RV32IZFINXZDINX-NEXT: or a0, a0, a1
+; RV32IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV32IZFINXZDINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFINXZDINX-NEXT: addi sp, sp, 16
; RV32IZFINXZDINX-NEXT: ret
@@ -171,8 +177,10 @@ define half @test_frexp_f16_i32_only_use_fract(half %a) nounwind {
; RV64IZFINXZDINX-NEXT: mv a1, sp
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: call __truncsfhf2
+; RV64IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w def $x10
; RV64IZFINXZDINX-NEXT: lui a1, 1048560
; RV64IZFINXZDINX-NEXT: or a0, a0, a1
+; RV64IZFINXZDINX-NEXT: # kill: def $x10_w killed $x10_w killed $x10
; RV64IZFINXZDINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFINXZDINX-NEXT: addi sp, sp, 16
; RV64IZFINXZDINX-NEXT: ret
@@ -638,25 +646,25 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: mv s0, a4
-; RV32IZFINXZDINX-NEXT: mv s1, a3
-; RV32IZFINXZDINX-NEXT: mv s2, a2
-; RV32IZFINXZDINX-NEXT: mv a2, a1
+; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: mv a0, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s4, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: mv a0, s2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: mv a0, s1
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s1, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
@@ -688,25 +696,25 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: mv s0, a4
-; RV64IZFINXZDINX-NEXT: mv s1, a3
-; RV64IZFINXZDINX-NEXT: mv s2, a2
-; RV64IZFINXZDINX-NEXT: mv a2, a1
+; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: mv a0, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s4, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: mv a0, s2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s2, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: mv a0, s1
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s1, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: mv a0, s0
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
; RV64IZFINXZDINX-NEXT: ld a2, 16(sp)
@@ -922,25 +930,25 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: mv s0, a4
-; RV32IZFINXZDINX-NEXT: mv s1, a3
-; RV32IZFINXZDINX-NEXT: mv s2, a2
-; RV32IZFINXZDINX-NEXT: mv a2, a1
+; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: mv a0, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s4, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: mv a0, s2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s2, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: mv a0, s1
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: mv s1, a0
+; RV32IZFINXZDINX-NEXT: fmv.s s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV32IZFINXZDINX-NEXT: sw s1, 8(s3)
@@ -964,25 +972,25 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: mv s0, a4
-; RV64IZFINXZDINX-NEXT: mv s1, a3
-; RV64IZFINXZDINX-NEXT: mv s2, a2
-; RV64IZFINXZDINX-NEXT: mv a2, a1
+; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: mv a0, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s4, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: mv a0, s2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s2, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: mv a0, s1
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: mv s1, a0
+; RV64IZFINXZDINX-NEXT: fmv.s s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: mv a0, s0
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV64IZFINXZDINX-NEXT: sw s1, 8(s3)
@@ -1172,22 +1180,22 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: mv s0, a4
-; RV32IZFINXZDINX-NEXT: mv s1, a3
-; RV32IZFINXZDINX-NEXT: mv s2, a2
-; RV32IZFINXZDINX-NEXT: mv a2, a1
+; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: mv a0, a2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: mv a0, s2
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: mv a0, s1
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV32IZFINXZDINX-NEXT: mv a0, s0
+; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a0, 24(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
@@ -1213,22 +1221,22 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV64IZFINXZDINX-NEXT: sd s1, 56(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: mv s0, a4
-; RV64IZFINXZDINX-NEXT: mv s1, a3
-; RV64IZFINXZDINX-NEXT: mv s2, a2
-; RV64IZFINXZDINX-NEXT: mv a2, a1
+; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
+; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
+; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: mv a0, a2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: mv a0, s2
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: mv a0, s1
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 32
-; RV64IZFINXZDINX-NEXT: mv a0, s0
+; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a0, 32(sp)
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
>From 4aab31bc9c7b26f45cbcb0abafe85649c3b2db54 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Sat, 14 Sep 2024 18:25:35 -0700
Subject: [PATCH 12/13] fixup! Add PseudoMV. Add load/store to more places.
---
.../Target/RISCV/RISCVExpandPseudoInsts.cpp | 21 +++
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 14 +-
llvm/lib/Target/RISCV/RISCVInstrInfoC.td | 48 +++++++
llvm/lib/Target/RISCV/RISCVInstrInfoF.td | 7 +
.../Target/RISCV/RISCVMakeCompressible.cpp | 9 ++
.../lib/Target/RISCV/RISCVMergeBaseOffset.cpp | 2 +
llvm/lib/Target/RISCV/RISCVRegisterInfo.td | 3 +
llvm/test/CodeGen/RISCV/float-br-fcmp.ll | 8 +-
llvm/test/CodeGen/RISCV/float-convert.ll | 8 +-
llvm/test/CodeGen/RISCV/float-imm.ll | 2 +-
.../CodeGen/RISCV/float-intrinsics-strict.ll | 12 +-
llvm/test/CodeGen/RISCV/float-intrinsics.ll | 12 +-
.../CodeGen/RISCV/float-maximum-minimum.ll | 16 +--
llvm/test/CodeGen/RISCV/float-mem.ll | 4 +-
.../CodeGen/RISCV/float-round-conv-sat.ll | 48 +++----
llvm/test/CodeGen/RISCV/float-select-fcmp.ll | 30 ++---
llvm/test/CodeGen/RISCV/float-select-icmp.ll | 40 +++---
llvm/test/CodeGen/RISCV/half-convert.ll | 8 +-
llvm/test/CodeGen/RISCV/half-intrinsics.ll | 16 +--
.../test/CodeGen/RISCV/half-round-conv-sat.ll | 48 +++----
llvm/test/CodeGen/RISCV/llvm.frexp.ll | 120 +++++++++---------
21 files changed, 286 insertions(+), 190 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
index 2501256ca6adf0..5dcec078856ead 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp
@@ -50,6 +50,8 @@ class RISCVExpandPseudo : public MachineFunctionPass {
MachineBasicBlock::iterator MBBI, unsigned Opcode);
bool expandMV_FPR16INX(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI);
+ bool expandMV_FPR32INX(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
bool expandRV32ZdinxStore(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI);
bool expandRV32ZdinxLoad(MachineBasicBlock &MBB,
@@ -108,6 +110,8 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
switch (MBBI->getOpcode()) {
case RISCV::PseudoMV_FPR16INX:
return expandMV_FPR16INX(MBB, MBBI);
+ case RISCV::PseudoMV_FPR32INX:
+ return expandMV_FPR32INX(MBB, MBBI);
case RISCV::PseudoRV32ZdinxSD:
return expandRV32ZdinxStore(MBB, MBBI);
case RISCV::PseudoRV32ZdinxLD:
@@ -287,6 +291,23 @@ bool RISCVExpandPseudo::expandMV_FPR16INX(MachineBasicBlock &MBB,
return true;
}
+bool RISCVExpandPseudo::expandMV_FPR32INX(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) {
+ DebugLoc DL = MBBI->getDebugLoc();
+ const TargetRegisterInfo *TRI = STI->getRegisterInfo();
+ Register DstReg = TRI->getMatchingSuperReg(
+ MBBI->getOperand(0).getReg(), RISCV::sub_32, &RISCV::GPRRegClass);
+ Register SrcReg = TRI->getMatchingSuperReg(
+ MBBI->getOperand(1).getReg(), RISCV::sub_32, &RISCV::GPRRegClass);
+
+ BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), DstReg)
+ .addReg(SrcReg, getKillRegState(MBBI->getOperand(1).isKill()))
+ .addImm(0);
+
+ MBBI->eraseFromParent(); // The pseudo instruction is gone now.
+ return true;
+}
+
// This function expands the PseudoRV32ZdinxSD for storing a double-precision
// floating-point value into memory by generating an equivalent instruction
// sequence for RV32.
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 5e4e8eefe6122b..b222029e9dd31c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -110,6 +110,7 @@ Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
MemBytes = 2;
break;
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::FLW:
case RISCV::LWU:
MemBytes = 4;
@@ -150,6 +151,7 @@ Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
MemBytes = 2;
break;
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::FSW:
MemBytes = 4;
break;
@@ -472,10 +474,9 @@ void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
}
if (RISCV::GPRF32RegClass.contains(DstReg, SrcReg)) {
- assert(STI.hasStdExtZfinx());
- BuildMI(MBB, MBBI, DL, get(RISCV::FSGNJ_S_INX), DstReg)
- .addReg(SrcReg, getKillRegState(KillSrc))
- .addReg(SrcReg, getKillRegState(KillSrc));
+ BuildMI(MBB, MBBI, DL, get(RISCV::PseudoMV_FPR32INX), DstReg)
+ .addReg(SrcReg,
+ getKillRegState(KillSrc) | getRenamableRegState(RenamableSrc));
return;
}
@@ -1535,6 +1536,7 @@ unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
switch (Opcode) {
case RISCV::PseudoMV_FPR16INX:
+ case RISCV::PseudoMV_FPR32INX:
// MV is always compressible.
return STI.hasStdExtCOrZca() ? 2 : 4;
case TargetOpcode::STACKMAP:
@@ -2595,6 +2597,7 @@ bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
case RISCV::LH_INX:
case RISCV::LHU:
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::LWU:
case RISCV::LD:
case RISCV::FLH:
@@ -2604,6 +2607,7 @@ bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
case RISCV::SH:
case RISCV::SH_INX:
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::SD:
case RISCV::FSH:
case RISCV::FSW:
@@ -2673,9 +2677,11 @@ bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
case RISCV::SH_INX:
case RISCV::FSH:
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::LWU:
case RISCV::FLW:
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::FSW:
case RISCV::LD:
case RISCV::FLD:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
index 3f279b7a58ca68..7d742322b42969 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoC.td
@@ -331,6 +331,15 @@ def C_LW : CLoad_ri<0b010, "c.lw", GPRC, uimm7_lsb00>,
let Inst{5} = imm{6};
}
+let isCodeGenOnly = 1 in
+def C_LW_INX : CLoad_ri<0b010, "c.lw", GPRF32C, uimm7_lsb00>,
+ Sched<[WriteLDW, ReadMemBase]> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
let DecoderNamespace = "RISCV32Only_",
Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in
def C_FLW : CLoad_ri<0b011, "c.flw", FPR32C, uimm7_lsb00>,
@@ -365,6 +374,15 @@ def C_SW : CStore_rri<0b110, "c.sw", GPRC, uimm7_lsb00>,
let Inst{5} = imm{6};
}
+let isCodeGenOnly = 1 in
+def C_SW_INX : CStore_rri<0b110, "c.sw", GPRF32C, uimm7_lsb00>,
+ Sched<[WriteSTW, ReadStoreData, ReadMemBase]> {
+ bits<7> imm;
+ let Inst{12-10} = imm{5-3};
+ let Inst{6} = imm{2};
+ let Inst{5} = imm{6};
+}
+
let DecoderNamespace = "RISCV32Only_",
Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in
def C_FSW : CStore_rri<0b111, "c.fsw", FPR32C, uimm7_lsb00>,
@@ -517,6 +535,13 @@ def C_LWSP : CStackLoad<0b010, "c.lwsp", GPRNoX0, uimm8_lsb00>,
let Inst{3-2} = imm{7-6};
}
+let isCodeGenOnly = 1 in
+def C_LWSP_INX : CStackLoad<0b010, "c.lwsp", GPRF32NoX0, uimm8_lsb00>,
+ Sched<[WriteLDW, ReadMemBase]> {
+ let Inst{6-4} = imm{4-2};
+ let Inst{3-2} = imm{7-6};
+}
+
let DecoderNamespace = "RISCV32Only_",
Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in
def C_FLWSP : CStackLoad<0b011, "c.flwsp", FPR32, uimm8_lsb00>,
@@ -575,6 +600,13 @@ def C_SWSP : CStackStore<0b110, "c.swsp", GPR, uimm8_lsb00>,
let Inst{8-7} = imm{7-6};
}
+let isCodeGenOnly = 1 in
+def C_SWSP_INX : CStackStore<0b110, "c.swsp", GPRF32, uimm8_lsb00>,
+ Sched<[WriteSTW, ReadStoreData, ReadMemBase]> {
+ let Inst{12-9} = imm{5-2};
+ let Inst{8-7} = imm{7-6};
+}
+
let DecoderNamespace = "RISCV32Only_",
Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in
def C_FSWSP : CStackStore<0b111, "c.fswsp", FPR32, uimm8_lsb00>,
@@ -869,6 +901,10 @@ def : CompressPat<(FLD FPR64C:$rd, GPRCMem:$rs1, uimm8_lsb000:$imm),
let Predicates = [HasStdExtCOrZca] in {
def : CompressPat<(LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm),
(C_LW GPRC:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>;
+
+let isCompressOnly = true in
+def : CompressPat<(LW_INX GPRF32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm),
+ (C_LW_INX GPRF32C:$rd, GPRCMem:$rs1, uimm7_lsb00:$imm)>;
} // Predicates = [HasStdExtCOrZca]
let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in {
@@ -889,6 +925,10 @@ def : CompressPat<(FSD FPR64C:$rs2, GPRCMem:$rs1, uimm8_lsb000:$imm),
let Predicates = [HasStdExtCOrZca] in {
def : CompressPat<(SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm),
(C_SW GPRC:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>;
+
+let isCompressOnly = true in
+def : CompressPat<(SW_INX GPRF32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm),
+ (C_SW_INX GPRF32C:$rs2, GPRCMem:$rs1, uimm7_lsb00:$imm)>;
} // Predicates = [HasStdExtCOrZca]
let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in {
@@ -992,6 +1032,10 @@ def : CompressPat<(FLD FPR64:$rd, SPMem:$rs1, uimm9_lsb000:$imm),
let Predicates = [HasStdExtCOrZca] in {
def : CompressPat<(LW GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm),
(C_LWSP GPRNoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>;
+
+let isCompressOnly = true in
+def : CompressPat<(LW_INX GPRF32NoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm),
+ (C_LWSP_INX GPRF32NoX0:$rd, SPMem:$rs1, uimm8_lsb00:$imm)>;
} // Predicates = [HasStdExtCOrZca]
let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in {
@@ -1034,6 +1078,10 @@ def : CompressPat<(FSD FPR64:$rs2, SPMem:$rs1, uimm9_lsb000:$imm),
let Predicates = [HasStdExtCOrZca] in {
def : CompressPat<(SW GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm),
(C_SWSP GPR:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>;
+
+let isCompressOnly = true in
+def : CompressPat<(SW_INX GPRF32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm),
+ (C_SWSP_INX GPRF32:$rs2, SPMem:$rs1, uimm8_lsb00:$imm)>;
} // Predicates = [HasStdExtCOrZca]
let Predicates = [HasStdExtCOrZcfOrZce, HasStdExtF, IsRV32] in {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index 7e9e10381f7e3c..1146637f106e7a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -309,6 +309,13 @@ let Predicates = [HasStdExtZfinx], isCodeGenOnly = 1 in {
def LW_INX : Load_ri<0b010, "lw", GPRF32>, Sched<[WriteLDW, ReadMemBase]>;
def SW_INX : Store_rri<0b010, "sw", GPRF32>,
Sched<[WriteSTW, ReadStoreData, ReadMemBase]>;
+
+// ADDI with GPRF16 register class to use for copy. This should not be used as
+// general ADDI, so the immediate should always be zero.
+let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveReg = 1,
+ hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
+def PseudoMV_FPR32INX : Pseudo<(outs GPRF32:$rd), (ins GPRF32:$rs), []>,
+ Sched<[WriteIALU, ReadIALU]>;
}
foreach Ext = FExts in {
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index 5973e5bf2e5252..81e03936bee424 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -109,7 +109,9 @@ static unsigned log2LdstWidth(unsigned Opcode) {
case RISCV::SH_INX:
return 1;
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::FLW:
case RISCV::FSW:
return 2;
@@ -136,7 +138,9 @@ static unsigned offsetMask(unsigned Opcode) {
case RISCV::SH_INX:
return maskTrailingOnes<unsigned>(1U);
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::FLW:
case RISCV::FSW:
case RISCV::LD:
@@ -178,6 +182,7 @@ static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
static bool isCompressedReg(Register Reg) {
return RISCV::GPRCRegClass.contains(Reg) ||
RISCV::GPRF16CRegClass.contains(Reg) ||
+ RISCV::GPRF32CRegClass.contains(Reg) ||
RISCV::FPR32CRegClass.contains(Reg) ||
RISCV::FPR64CRegClass.contains(Reg);
}
@@ -195,6 +200,7 @@ static bool isCompressibleLoad(const MachineInstr &MI) {
case RISCV::LHU:
return STI.hasStdExtZcb();
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::LD:
return STI.hasStdExtCOrZca();
case RISCV::FLW:
@@ -216,6 +222,7 @@ static bool isCompressibleStore(const MachineInstr &MI) {
case RISCV::SH_INX:
return STI.hasStdExtZcb();
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::SD:
return STI.hasStdExtCOrZca();
case RISCV::FSW:
@@ -329,6 +336,8 @@ static Register analyzeCompressibleUses(MachineInstr &FirstMI,
RCToScavenge = &RISCV::GPRCRegClass;
else if (RISCV::GPRF16RegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::GPRF16CRegClass;
+ else if (RISCV::GPRF32RegClass.contains(RegImm.Reg))
+ RCToScavenge = &RISCV::GPRF32CRegClass;
else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::FPR32CRegClass;
else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
diff --git a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
index b3a2877edde4e3..a324deb4e48f5c 100644
--- a/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMergeBaseOffset.cpp
@@ -387,6 +387,7 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
case RISCV::LH:
case RISCV::LH_INX:
case RISCV::LW:
+ case RISCV::LW_INX:
case RISCV::LBU:
case RISCV::LHU:
case RISCV::LWU:
@@ -398,6 +399,7 @@ bool RISCVMergeBaseOffsetOpt::foldIntoMemoryOps(MachineInstr &Hi,
case RISCV::SH:
case RISCV::SH_INX:
case RISCV::SW:
+ case RISCV::SW_INX:
case RISCV::SD:
case RISCV::FSH:
case RISCV::FSW:
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 7c7796ae17fb79..1c5cdea1978efd 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -663,6 +663,9 @@ def GPRF32 : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 17),
(sequence "X%u_W", 8, 9),
(sequence "X%u_W", 18, 27),
(sequence "X%u_W", 0, 4))>;
+def GPRF32C : RISCVRegisterClass<[f32], 32, (add (sequence "X%u_W", 10, 15),
+ (sequence "X%u_W", 8, 9))>;
+def GPRF32NoX0 : RISCVRegisterClass<[f32], 32, (sub GPRF32, X0_W)>;
// Dummy zero register for use in the register pair containing X0 (as X1 is
// not read to or written when the X0 register pair is used).
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 20c8a2f4ceefe9..35caa627b57bc3 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -1003,12 +1003,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV32IZFINX: # %bb.0: # %entry
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s a0, zero
+; RV32IZFINX-NEXT: li a0, 0
; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
; RV32IZFINX-NEXT: # %bb.1: # %if.end
-; RV32IZFINX-NEXT: fmv.s a0, zero
+; RV32IZFINX-NEXT: li a0, 0
; RV32IZFINX-NEXT: call dummy
; RV32IZFINX-NEXT: feq.s a0, a0, zero
; RV32IZFINX-NEXT: beqz a0, .LBB17_3
@@ -1024,12 +1024,12 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IZFINX: # %bb.0: # %entry
; RV64IZFINX-NEXT: addi sp, sp, -16
; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: fmv.s a0, zero
+; RV64IZFINX-NEXT: li a0, 0
; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
; RV64IZFINX-NEXT: # %bb.1: # %if.end
-; RV64IZFINX-NEXT: fmv.s a0, zero
+; RV64IZFINX-NEXT: li a0, 0
; RV64IZFINX-NEXT: call dummy
; RV64IZFINX-NEXT: feq.s a0, a0, zero
; RV64IZFINX-NEXT: beqz a0, .LBB17_3
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 0ffd5ff9c37a4f..031976b4fa2b21 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -671,10 +671,10 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -905,10 +905,10 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: fle.s a0, zero, a0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
index 1f82dc17d7cff9..58cbc72e2197c9 100644
--- a/llvm/test/CodeGen/RISCV/float-imm.ll
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -57,7 +57,7 @@ define float @float_positive_zero(ptr %pf) nounwind {
;
; CHECKZFINX-LABEL: float_positive_zero:
; CHECKZFINX: # %bb.0:
-; CHECKZFINX-NEXT: fmv.s a0, zero
+; CHECKZFINX-NEXT: li a0, 0
; CHECKZFINX-NEXT: ret
ret float 0.0
}
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index 73f73a6ad99086..cbd84634de11c0 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -279,10 +279,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: call sinf
-; RV32IZFINX-NEXT: fmv.s s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -297,10 +297,10 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: fmv.s s0, a0
+; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: call sinf
-; RV64IZFINX-NEXT: fmv.s s1, a0
-; RV64IZFINX-NEXT: fmv.s a0, s0
+; RV64IZFINX-NEXT: mv s1, a0
+; RV64IZFINX-NEXT: mv a0, s0
; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 1121a37e5d7e9a..b05eac9c9dee26 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -225,10 +225,10 @@ define float @sincos_f32(float %a) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: call sinf
-; RV32IZFINX-NEXT: fmv.s s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call cosf
; RV32IZFINX-NEXT: fadd.s a0, s1, a0
; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -243,10 +243,10 @@ define float @sincos_f32(float %a) nounwind {
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: fmv.s s0, a0
+; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: call sinf
-; RV64IZFINX-NEXT: fmv.s s1, a0
-; RV64IZFINX-NEXT: fmv.s a0, s0
+; RV64IZFINX-NEXT: mv s1, a0
+; RV64IZFINX-NEXT: mv a0, s0
; RV64IZFINX-NEXT: call cosf
; RV64IZFINX-NEXT: fadd.s a0, s1, a0
; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
index 14f422f6c7a02a..0e00dff0b64245 100644
--- a/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
+++ b/llvm/test/CodeGen/RISCV/float-maximum-minimum.ll
@@ -43,7 +43,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV32IZFINX-LABEL: fminimum_f32:
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: feq.s a3, a0, a0
-; RV32IZFINX-NEXT: fmv.s a2, a1
+; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: beqz a3, .LBB0_3
; RV32IZFINX-NEXT: # %bb.1:
; RV32IZFINX-NEXT: feq.s a3, a1, a1
@@ -52,7 +52,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: fmin.s a0, a0, a2
; RV32IZFINX-NEXT: ret
; RV32IZFINX-NEXT: .LBB0_3:
-; RV32IZFINX-NEXT: fmv.s a2, a0
+; RV32IZFINX-NEXT: mv a2, a0
; RV32IZFINX-NEXT: feq.s a3, a1, a1
; RV32IZFINX-NEXT: bnez a3, .LBB0_2
; RV32IZFINX-NEXT: .LBB0_4:
@@ -81,7 +81,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV64IZFINX-LABEL: fminimum_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: feq.s a3, a0, a0
-; RV64IZFINX-NEXT: fmv.s a2, a1
+; RV64IZFINX-NEXT: mv a2, a1
; RV64IZFINX-NEXT: beqz a3, .LBB0_3
; RV64IZFINX-NEXT: # %bb.1:
; RV64IZFINX-NEXT: feq.s a3, a1, a1
@@ -90,7 +90,7 @@ define float @fminimum_f32(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: fmin.s a0, a0, a2
; RV64IZFINX-NEXT: ret
; RV64IZFINX-NEXT: .LBB0_3:
-; RV64IZFINX-NEXT: fmv.s a2, a0
+; RV64IZFINX-NEXT: mv a2, a0
; RV64IZFINX-NEXT: feq.s a3, a1, a1
; RV64IZFINX-NEXT: bnez a3, .LBB0_2
; RV64IZFINX-NEXT: .LBB0_4:
@@ -125,7 +125,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV32IZFINX-LABEL: fmaximum_f32:
; RV32IZFINX: # %bb.0:
; RV32IZFINX-NEXT: feq.s a3, a0, a0
-; RV32IZFINX-NEXT: fmv.s a2, a1
+; RV32IZFINX-NEXT: mv a2, a1
; RV32IZFINX-NEXT: beqz a3, .LBB1_3
; RV32IZFINX-NEXT: # %bb.1:
; RV32IZFINX-NEXT: feq.s a3, a1, a1
@@ -134,7 +134,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV32IZFINX-NEXT: fmax.s a0, a0, a2
; RV32IZFINX-NEXT: ret
; RV32IZFINX-NEXT: .LBB1_3:
-; RV32IZFINX-NEXT: fmv.s a2, a0
+; RV32IZFINX-NEXT: mv a2, a0
; RV32IZFINX-NEXT: feq.s a3, a1, a1
; RV32IZFINX-NEXT: bnez a3, .LBB1_2
; RV32IZFINX-NEXT: .LBB1_4:
@@ -163,7 +163,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV64IZFINX-LABEL: fmaximum_f32:
; RV64IZFINX: # %bb.0:
; RV64IZFINX-NEXT: feq.s a3, a0, a0
-; RV64IZFINX-NEXT: fmv.s a2, a1
+; RV64IZFINX-NEXT: mv a2, a1
; RV64IZFINX-NEXT: beqz a3, .LBB1_3
; RV64IZFINX-NEXT: # %bb.1:
; RV64IZFINX-NEXT: feq.s a3, a1, a1
@@ -172,7 +172,7 @@ define float @fmaximum_f32(float %a, float %b) nounwind {
; RV64IZFINX-NEXT: fmax.s a0, a0, a2
; RV64IZFINX-NEXT: ret
; RV64IZFINX-NEXT: .LBB1_3:
-; RV64IZFINX-NEXT: fmv.s a2, a0
+; RV64IZFINX-NEXT: mv a2, a0
; RV64IZFINX-NEXT: feq.s a3, a1, a1
; RV64IZFINX-NEXT: bnez a3, .LBB1_2
; RV64IZFINX-NEXT: .LBB1_4:
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index 3038a03d1ada00..3779d39a753e1e 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -170,7 +170,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV32IZFINX-NEXT: addi sp, sp, -16
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: addi a0, sp, 4
; RV32IZFINX-NEXT: call notdead
; RV32IZFINX-NEXT: lw a0, 4(sp)
@@ -185,7 +185,7 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV64IZFINX-NEXT: addi sp, sp, -32
; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IZFINX-NEXT: fmv.s s0, a0
+; RV64IZFINX-NEXT: mv s0, a0
; RV64IZFINX-NEXT: addi a0, sp, 12
; RV64IZFINX-NEXT: call notdead
; RV64IZFINX-NEXT: lw a0, 12(sp)
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index dbfecd94779bed..198b18c75272a9 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -96,7 +96,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -108,7 +108,7 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB1_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -225,7 +225,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -237,7 +237,7 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB3_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -354,7 +354,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -366,7 +366,7 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB5_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -483,7 +483,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -495,7 +495,7 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB7_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -612,7 +612,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -624,7 +624,7 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB9_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -741,7 +741,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -753,7 +753,7 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB11_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -870,7 +870,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -882,7 +882,7 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB13_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -999,7 +999,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1011,7 +1011,7 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB15_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -1128,7 +1128,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1140,7 +1140,7 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB17_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -1257,7 +1257,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1269,7 +1269,7 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB19_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
@@ -1386,7 +1386,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1398,7 +1398,7 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB21_2:
; RV32IZFINX-NEXT: lui a0, 913408
; RV32IZFINX-NEXT: fle.s s1, a0, s0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixsfdi
; RV32IZFINX-NEXT: lui a4, 524288
; RV32IZFINX-NEXT: lui a2, 524288
@@ -1515,7 +1515,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFINX-NEXT: fmv.s s0, a0
+; RV32IZFINX-NEXT: mv s0, a0
; RV32IZFINX-NEXT: lui a0, 307200
; RV32IZFINX-NEXT: fabs.s a1, s0
; RV32IZFINX-NEXT: flt.s a0, a1, a0
@@ -1527,7 +1527,7 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IZFINX-NEXT: .LBB23_2:
; RV32IZFINX-NEXT: fle.s a0, zero, s0
; RV32IZFINX-NEXT: neg s1, a0
-; RV32IZFINX-NEXT: fmv.s a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
; RV32IZFINX-NEXT: call __fixunssfdi
; RV32IZFINX-NEXT: and a0, s1, a0
; RV32IZFINX-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
index 0e0d080373756f..a2ff0d33e2d31a 100644
--- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
@@ -16,7 +16,7 @@ define float @select_fcmp_false(float %a, float %b) nounwind {
;
; CHECKZFINX-LABEL: select_fcmp_false:
; CHECKZFINX: # %bb.0:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: ret
%1 = fcmp false float %a, %b
%2 = select i1 %1, float %a, float %b
@@ -38,7 +38,7 @@ define float @select_fcmp_oeq(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: feq.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB1_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB1_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp oeq float %a, %b
@@ -61,7 +61,7 @@ define float @select_fcmp_ogt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a1, a0
; CHECKZFINX-NEXT: bnez a2, .LBB2_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB2_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ogt float %a, %b
@@ -84,7 +84,7 @@ define float @select_fcmp_oge(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a1, a0
; CHECKZFINX-NEXT: bnez a2, .LBB3_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB3_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp oge float %a, %b
@@ -107,7 +107,7 @@ define float @select_fcmp_olt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB4_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB4_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp olt float %a, %b
@@ -130,7 +130,7 @@ define float @select_fcmp_ole(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a0, a1
; CHECKZFINX-NEXT: bnez a2, .LBB5_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB5_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ole float %a, %b
@@ -157,7 +157,7 @@ define float @select_fcmp_one(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: or a2, a3, a2
; CHECKZFINX-NEXT: bnez a2, .LBB6_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB6_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp one float %a, %b
@@ -184,7 +184,7 @@ define float @select_fcmp_ord(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: and a2, a3, a2
; CHECKZFINX-NEXT: bnez a2, .LBB7_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB7_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ord float %a, %b
@@ -211,7 +211,7 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: or a2, a3, a2
; CHECKZFINX-NEXT: beqz a2, .LBB8_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB8_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ueq float %a, %b
@@ -234,7 +234,7 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB9_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB9_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ugt float %a, %b
@@ -257,7 +257,7 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB10_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB10_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp uge float %a, %b
@@ -280,7 +280,7 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: fle.s a2, a1, a0
; CHECKZFINX-NEXT: beqz a2, .LBB11_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB11_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ult float %a, %b
@@ -303,7 +303,7 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: flt.s a2, a1, a0
; CHECKZFINX-NEXT: beqz a2, .LBB12_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB12_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp ule float %a, %b
@@ -326,7 +326,7 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: feq.s a2, a0, a1
; CHECKZFINX-NEXT: beqz a2, .LBB13_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB13_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp une float %a, %b
@@ -353,7 +353,7 @@ define float @select_fcmp_uno(float %a, float %b) nounwind {
; CHECKZFINX-NEXT: and a2, a3, a2
; CHECKZFINX-NEXT: beqz a2, .LBB14_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a0, a1
+; CHECKZFINX-NEXT: mv a0, a1
; CHECKZFINX-NEXT: .LBB14_2:
; CHECKZFINX-NEXT: ret
%1 = fcmp uno float %a, %b
diff --git a/llvm/test/CodeGen/RISCV/float-select-icmp.ll b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
index fd899b4ad45f6c..e8f420f1b07254 100644
--- a/llvm/test/CodeGen/RISCV/float-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
@@ -21,9 +21,9 @@ define float @select_icmp_eq(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: beq a0, a1, .LBB0_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB0_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp eq i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -43,9 +43,9 @@ define float @select_icmp_ne(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bne a0, a1, .LBB1_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB1_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ne i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -65,9 +65,9 @@ define float @select_icmp_ugt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bltu a1, a0, .LBB2_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB2_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ugt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -87,9 +87,9 @@ define float @select_icmp_uge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bgeu a0, a1, .LBB3_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB3_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp uge i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -109,9 +109,9 @@ define float @select_icmp_ult(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bltu a0, a1, .LBB4_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB4_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -131,9 +131,9 @@ define float @select_icmp_ule(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bgeu a1, a0, .LBB5_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB5_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp ule i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -153,9 +153,9 @@ define float @select_icmp_sgt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: blt a1, a0, .LBB6_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB6_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sgt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -175,9 +175,9 @@ define float @select_icmp_sge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bge a0, a1, .LBB7_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB7_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sge i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -197,9 +197,9 @@ define float @select_icmp_slt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: blt a0, a1, .LBB8_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB8_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp slt i32 %a, %b
%2 = select i1 %1, float %c, float %d
@@ -219,9 +219,9 @@ define float @select_icmp_sle(i32 signext %a, i32 signext %b, float %c, float %d
; CHECKZFINX: # %bb.0:
; CHECKZFINX-NEXT: bge a1, a0, .LBB9_2
; CHECKZFINX-NEXT: # %bb.1:
-; CHECKZFINX-NEXT: fmv.s a2, a3
+; CHECKZFINX-NEXT: mv a2, a3
; CHECKZFINX-NEXT: .LBB9_2:
-; CHECKZFINX-NEXT: fmv.s a0, a2
+; CHECKZFINX-NEXT: mv a0, a2
; CHECKZFINX-NEXT: ret
%1 = icmp sle i32 %a, %b
%2 = select i1 %1, float %c, float %d
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index e6395de7458f99..0c84a08f1fd451 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2246,7 +2246,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -2293,7 +2293,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZDINXZHINX-NEXT: lui a0, 913408
; RV32IZDINXZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZDINXZHINX-NEXT: fmv.s a0, s0
+; RV32IZDINXZHINX-NEXT: mv a0, s0
; RV32IZDINXZHINX-NEXT: call __fixsfdi
; RV32IZDINXZHINX-NEXT: lui a4, 524288
; RV32IZDINXZHINX-NEXT: lui a2, 524288
@@ -2637,7 +2637,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0
; CHECK32-IZHINXMIN-NEXT: lui a0, 913408
; CHECK32-IZHINXMIN-NEXT: fle.s s1, a0, s0
-; CHECK32-IZHINXMIN-NEXT: fmv.s a0, s0
+; CHECK32-IZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZHINXMIN-NEXT: lui a4, 524288
; CHECK32-IZHINXMIN-NEXT: lui a2, 524288
@@ -2685,7 +2685,7 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0
; CHECK32-IZDINXZHINXMIN-NEXT: lui a0, 913408
; CHECK32-IZDINXZHINXMIN-NEXT: fle.s s1, a0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT: fmv.s a0, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZDINXZHINXMIN-NEXT: call __fixsfdi
; CHECK32-IZDINXZHINXMIN-NEXT: lui a4, 524288
; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 524288
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 2d6d41bc6474f6..18cdb18106f343 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -533,10 +533,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call sinf
; RV32IZHINX-NEXT: fcvt.h.s s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call cosf
; RV32IZHINX-NEXT: fcvt.h.s a0, a0
; RV32IZHINX-NEXT: fadd.h a0, s1, a0
@@ -553,10 +553,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: fcvt.s.h s0, a0
-; RV64IZHINX-NEXT: fmv.s a0, s0
+; RV64IZHINX-NEXT: mv a0, s0
; RV64IZHINX-NEXT: call sinf
; RV64IZHINX-NEXT: fcvt.h.s s1, a0
-; RV64IZHINX-NEXT: fmv.s a0, s0
+; RV64IZHINX-NEXT: mv a0, s0
; RV64IZHINX-NEXT: call cosf
; RV64IZHINX-NEXT: fcvt.h.s a0, a0
; RV64IZHINX-NEXT: fadd.h a0, s1, a0
@@ -775,10 +775,10 @@ define half @sincos_f16(half %a) nounwind {
; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call sinf
; RV32IZHINXMIN-NEXT: fcvt.h.s s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call cosf
; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0
@@ -798,10 +798,10 @@ define half @sincos_f16(half %a) nounwind {
; RV64IZHINXMIN-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IZHINXMIN-NEXT: fcvt.s.h s0, a0
-; RV64IZHINXMIN-NEXT: fmv.s a0, s0
+; RV64IZHINXMIN-NEXT: mv a0, s0
; RV64IZHINXMIN-NEXT: call sinf
; RV64IZHINXMIN-NEXT: fcvt.h.s s1, a0
-; RV64IZHINXMIN-NEXT: fmv.s a0, s0
+; RV64IZHINXMIN-NEXT: mv a0, s0
; RV64IZHINXMIN-NEXT: call cosf
; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0
; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index b83f121e07f6e7..9e1a26e74d70b9 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -170,7 +170,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -311,7 +311,7 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -568,7 +568,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -683,7 +683,7 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -878,7 +878,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -1019,7 +1019,7 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -1276,7 +1276,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -1391,7 +1391,7 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -1586,7 +1586,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -1727,7 +1727,7 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -1984,7 +1984,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -2099,7 +2099,7 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -2294,7 +2294,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -2435,7 +2435,7 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -2692,7 +2692,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -2807,7 +2807,7 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -3002,7 +3002,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -3143,7 +3143,7 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -3400,7 +3400,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -3515,7 +3515,7 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
@@ -3710,7 +3710,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: lui a0, 913408
; RV32IZHINX-NEXT: fle.s s1, a0, s0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixsfdi
; RV32IZHINX-NEXT: lui a4, 524288
; RV32IZHINX-NEXT: lui a2, 524288
@@ -3851,7 +3851,7 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: lui a0, 913408
; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixsfdi
; RV32IZHINXMIN-NEXT: lui a4, 524288
; RV32IZHINXMIN-NEXT: lui a2, 524288
@@ -4108,7 +4108,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINX-NEXT: fcvt.s.h s0, a0
; RV32IZHINX-NEXT: fle.s a0, zero, s0
; RV32IZHINX-NEXT: neg s1, a0
-; RV32IZHINX-NEXT: fmv.s a0, s0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
; RV32IZHINX-NEXT: lui a2, 391168
@@ -4223,7 +4223,7 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0
; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0
; RV32IZHINXMIN-NEXT: neg s1, a0
-; RV32IZHINXMIN-NEXT: fmv.s a0, s0
+; RV32IZHINXMIN-NEXT: mv a0, s0
; RV32IZHINXMIN-NEXT: call __fixunssfdi
; RV32IZHINXMIN-NEXT: and a0, s1, a0
; RV32IZHINXMIN-NEXT: lui a2, 391168
diff --git a/llvm/test/CodeGen/RISCV/llvm.frexp.ll b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
index 01719bf9f18872..557bca0b73c8a6 100644
--- a/llvm/test/CodeGen/RISCV/llvm.frexp.ll
+++ b/llvm/test/CodeGen/RISCV/llvm.frexp.ll
@@ -646,25 +646,25 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a4
+; RV32IZFINXZDINX-NEXT: mv s1, a3
+; RV32IZFINXZDINX-NEXT: mv s2, a2
+; RV32IZFINXZDINX-NEXT: mv a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s4, a0
+; RV32IZFINXZDINX-NEXT: mv s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV32IZFINXZDINX-NEXT: mv a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s2, a0
+; RV32IZFINXZDINX-NEXT: mv s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s1, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV32IZFINXZDINX-NEXT: mv a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
; RV32IZFINXZDINX-NEXT: lw a2, 16(sp)
@@ -696,25 +696,25 @@ define { <4 x float>, <4 x i32> } @test_frexp_v4f32_v4i32(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV64IZFINXZDINX-NEXT: mv s0, a4
+; RV64IZFINXZDINX-NEXT: mv s1, a3
+; RV64IZFINXZDINX-NEXT: mv s2, a2
+; RV64IZFINXZDINX-NEXT: mv a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV64IZFINXZDINX-NEXT: mv a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s4, a0
+; RV64IZFINXZDINX-NEXT: mv s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV64IZFINXZDINX-NEXT: mv a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s2, a0
+; RV64IZFINXZDINX-NEXT: mv s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV64IZFINXZDINX-NEXT: mv a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s1, a0
+; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV64IZFINXZDINX-NEXT: mv a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
; RV64IZFINXZDINX-NEXT: ld a2, 16(sp)
@@ -930,25 +930,25 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a4
+; RV32IZFINXZDINX-NEXT: mv s1, a3
+; RV32IZFINXZDINX-NEXT: mv s2, a2
+; RV32IZFINXZDINX-NEXT: mv a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s4, a0
+; RV32IZFINXZDINX-NEXT: mv s4, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV32IZFINXZDINX-NEXT: mv a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s2, a0
+; RV32IZFINXZDINX-NEXT: mv s2, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
-; RV32IZFINXZDINX-NEXT: fmv.s s1, a0
+; RV32IZFINXZDINX-NEXT: mv s1, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV32IZFINXZDINX-NEXT: mv a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV32IZFINXZDINX-NEXT: sw s1, 8(s3)
@@ -972,25 +972,25 @@ define <4 x float> @test_frexp_v4f32_v4i32_only_use_fract(<4 x float> %a) nounwi
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV64IZFINXZDINX-NEXT: mv s0, a4
+; RV64IZFINXZDINX-NEXT: mv s1, a3
+; RV64IZFINXZDINX-NEXT: mv s2, a2
+; RV64IZFINXZDINX-NEXT: mv a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: mv a1, sp
-; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV64IZFINXZDINX-NEXT: mv a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s4, a0
+; RV64IZFINXZDINX-NEXT: mv s4, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV64IZFINXZDINX-NEXT: mv a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s2, a0
+; RV64IZFINXZDINX-NEXT: mv s2, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV64IZFINXZDINX-NEXT: mv a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
-; RV64IZFINXZDINX-NEXT: fmv.s s1, a0
+; RV64IZFINXZDINX-NEXT: mv s1, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV64IZFINXZDINX-NEXT: mv a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: sw a0, 12(s3)
; RV64IZFINXZDINX-NEXT: sw s1, 8(s3)
@@ -1180,22 +1180,22 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV32IZFINXZDINX-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
; RV32IZFINXZDINX-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV32IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV32IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV32IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV32IZFINXZDINX-NEXT: mv s0, a4
+; RV32IZFINXZDINX-NEXT: mv s1, a3
+; RV32IZFINXZDINX-NEXT: mv s2, a2
+; RV32IZFINXZDINX-NEXT: mv a2, a1
; RV32IZFINXZDINX-NEXT: mv s3, a0
; RV32IZFINXZDINX-NEXT: addi a1, sp, 12
-; RV32IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV32IZFINXZDINX-NEXT: mv a0, a2
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV32IZFINXZDINX-NEXT: mv a0, s2
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 20
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV32IZFINXZDINX-NEXT: mv a0, s1
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV32IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV32IZFINXZDINX-NEXT: mv a0, s0
; RV32IZFINXZDINX-NEXT: call frexpf
; RV32IZFINXZDINX-NEXT: lw a0, 24(sp)
; RV32IZFINXZDINX-NEXT: lw a1, 20(sp)
@@ -1221,22 +1221,22 @@ define <4 x i32> @test_frexp_v4f32_v4i32_only_use_exp(<4 x float> %a) nounwind {
; RV64IZFINXZDINX-NEXT: sd s1, 56(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
; RV64IZFINXZDINX-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
-; RV64IZFINXZDINX-NEXT: fmv.s s0, a4
-; RV64IZFINXZDINX-NEXT: fmv.s s1, a3
-; RV64IZFINXZDINX-NEXT: fmv.s s2, a2
-; RV64IZFINXZDINX-NEXT: fmv.s a2, a1
+; RV64IZFINXZDINX-NEXT: mv s0, a4
+; RV64IZFINXZDINX-NEXT: mv s1, a3
+; RV64IZFINXZDINX-NEXT: mv s2, a2
+; RV64IZFINXZDINX-NEXT: mv a2, a1
; RV64IZFINXZDINX-NEXT: mv s3, a0
; RV64IZFINXZDINX-NEXT: addi a1, sp, 8
-; RV64IZFINXZDINX-NEXT: fmv.s a0, a2
+; RV64IZFINXZDINX-NEXT: mv a0, a2
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 16
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s2
+; RV64IZFINXZDINX-NEXT: mv a0, s2
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 24
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s1
+; RV64IZFINXZDINX-NEXT: mv a0, s1
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: addi a1, sp, 32
-; RV64IZFINXZDINX-NEXT: fmv.s a0, s0
+; RV64IZFINXZDINX-NEXT: mv a0, s0
; RV64IZFINXZDINX-NEXT: call frexpf
; RV64IZFINXZDINX-NEXT: ld a0, 32(sp)
; RV64IZFINXZDINX-NEXT: ld a1, 24(sp)
>From 06ed80406d5a65dcec2b5625969c4327696a0938 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 16 Sep 2024 15:01:10 -0700
Subject: [PATCH 13/13] fixup! Support MakeCompressible
---
.../Target/RISCV/RISCVMakeCompressible.cpp | 5 +
.../CodeGen/RISCV/make-compressible-zfinx.mir | 296 ++++++++++++++++++
2 files changed, 301 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/make-compressible-zfinx.mir
diff --git a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
index 81e03936bee424..df5501e37f8313 100644
--- a/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMakeCompressible.cpp
@@ -433,6 +433,11 @@ bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR16INX),
NewReg)
.addReg(RegImm.Reg);
+ } else if (RISCV::GPRF32RegClass.contains(RegImm.Reg)) {
+ assert(RegImm.Imm == 0);
+ BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::PseudoMV_FPR32INX),
+ NewReg)
+ .addReg(RegImm.Reg);
} else {
// If we are looking at replacing an FPR register we don't expect to
// have any offset. The only compressible FP instructions with an offset
diff --git a/llvm/test/CodeGen/RISCV/make-compressible-zfinx.mir b/llvm/test/CodeGen/RISCV/make-compressible-zfinx.mir
new file mode 100644
index 00000000000000..d0223dc5911ad3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/make-compressible-zfinx.mir
@@ -0,0 +1,296 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=riscv32 -mattr=+c,+zfinx -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+# RUN: llc -o - %s -mtriple=riscv64 -mattr=+c,+zfinx -simplify-mir \
+# RUN: -run-pass=riscv-make-compressible | FileCheck --check-prefixes=CHECK %s
+
+--- |
+
+ define void @store_common_value_float(ptr %a, ptr %b, ptr %c, float %d, float %e, float %f, float %g, float %h, float %i, float %j) #0 {
+ entry:
+ store float %j, ptr %a, align 4
+ store float %j, ptr %b, align 4
+ store float %j, ptr %c, align 4
+ ret void
+ }
+
+ define void @store_common_ptr_float(float %a, float %b, float %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, ptr %p) #0 {
+ entry:
+ store volatile float %a, ptr %p, align 4
+ store volatile float %b, ptr %p, align 4
+ store volatile float %c, ptr %p, align 4
+ ret void
+ }
+
+ define void @load_common_ptr_float(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
+ entry:
+ %0 = load float, ptr %g, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %g, i32 1
+ %1 = load float, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %g, i32 2
+ %2 = load float, ptr %arrayidx2, align 4
+ tail call void @load_common_ptr_float_1(float %0, float %1, float %2)
+ ret void
+ }
+
+ declare void @load_common_ptr_float_1(float, float, float) #0
+
+ define void @store_large_offset_float(ptr %p, float %a, float %b, float %c, float %d) #0 {
+ entry:
+ %0 = getelementptr inbounds float, ptr %p, i32 100
+ store volatile float %a, ptr %0, align 4
+ %1 = getelementptr inbounds float, ptr %p, i32 101
+ store volatile float %b, ptr %1, align 4
+ %2 = getelementptr inbounds float, ptr %p, i32 102
+ store volatile float %c, ptr %2, align 4
+ %3 = getelementptr inbounds float, ptr %p, i32 103
+ store volatile float %d, ptr %3, align 4
+ ret void
+ }
+
+ define void @load_large_offset_float(ptr %p) #0 {
+ entry:
+ %arrayidx = getelementptr inbounds float, ptr %p, i32 100
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %p, i32 101
+ %1 = load float, ptr %arrayidx1, align 4
+ %arrayidx2 = getelementptr inbounds float, ptr %p, i32 102
+ %2 = load float, ptr %arrayidx2, align 4
+ tail call void @load_large_offset_float_1(float %0, float %1, float %2)
+ ret void
+ }
+
+ declare void @load_large_offset_float_1(float, float, float) #0
+
+ define void @store_common_value_float_no_opt(ptr %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h) #0 {
+ entry:
+ store float %h, ptr %a, align 4
+ ret void
+ }
+
+ define void @store_common_ptr_float_no_opt(float %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, ptr %p) #0 {
+ entry:
+ store volatile float %a, ptr %p, align 4
+ ret void
+ }
+
+ define float @load_common_ptr_float_no_opt(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, ptr %g) #0 {
+ entry:
+ %0 = load float, ptr %g, align 4
+ ret float %0
+ }
+
+ define void @store_large_offset_float_no_opt(ptr %p, float %a, float %b) #0 {
+ entry:
+ %0 = getelementptr inbounds float, ptr %p, i32 100
+ store volatile float %a, ptr %0, align 4
+ %1 = getelementptr inbounds float, ptr %p, i32 101
+ store volatile float %b, ptr %1, align 4
+ ret void
+ }
+
+ define { float, float } @load_large_offset_float_no_opt(ptr %p) #0 {
+ entry:
+ %arrayidx = getelementptr inbounds float, ptr %p, i32 100
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds float, ptr %p, i32 101
+ %1 = load float, ptr %arrayidx1, align 4
+ %2 = insertvalue { float, float } undef, float %0, 0
+ %3 = insertvalue { float, float } %2, float %1, 1
+ ret { float, float } %3
+ }
+
+ attributes #0 = { minsize }
+
+...
+---
+name: store_common_value_float
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11, $x12
+
+ ; CHECK-LABEL: name: store_common_value_float
+ ; CHECK: liveins: $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13_w = PseudoMV_FPR32INX $x0_w
+ ; CHECK-NEXT: SW_INX $x13_w, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ ; CHECK-NEXT: SW_INX $x13_w, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+ ; CHECK-NEXT: SW_INX killed $x13_w, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX $x0_w, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ SW_INX $x0_w, killed renamable $x11, 0 :: (store (s32) into %ir.b)
+ SW_INX killed $x0_w, killed renamable $x12, 0 :: (store (s32) into %ir.c)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_float
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10_w, $x11_w, $x12_w, $x16
+
+ ; CHECK-LABEL: name: store_common_ptr_float
+ ; CHECK: liveins: $x10_w, $x11_w, $x12_w, $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x16, 0
+ ; CHECK-NEXT: SW_INX killed renamable $x10_w, $x13, 0 :: (volatile store (s32) into %ir.p)
+ ; CHECK-NEXT: SW_INX killed renamable $x11_w, $x13, 0 :: (volatile store (s32) into %ir.p)
+ ; CHECK-NEXT: SW_INX killed renamable $x12_w, killed $x13, 0 :: (volatile store (s32) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX killed renamable $x10_w, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ SW_INX killed renamable $x11_w, renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ SW_INX killed renamable $x12_w, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_float
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_float
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x13 = ADDI $x16, 0
+ ; CHECK-NEXT: renamable $x10_w = LW_INX $x13, 0 :: (load (s32) from %ir.g)
+ ; CHECK-NEXT: renamable $x11_w = LW_INX $x13, 4 :: (load (s32) from %ir.arrayidx1)
+ ; CHECK-NEXT: renamable $x12_w = LW_INX killed $x13, 8 :: (load (s32) from %ir.arrayidx2)
+ ; CHECK-NEXT: PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $x10_w, implicit $x11_w, implicit $x12_w
+ renamable $x10_w = LW_INX renamable $x16, 0 :: (load (s32) from %ir.g)
+ renamable $x11_w = LW_INX renamable $x16, 4 :: (load (s32) from %ir.arrayidx1)
+ renamable $x12_w = LW_INX killed renamable $x16, 8 :: (load (s32) from %ir.arrayidx2)
+ PseudoTAIL target-flags(riscv-call) @load_common_ptr_float_1, implicit $x2, implicit $x10_w, implicit $x11_w, implicit $x12_w
+
+...
+---
+name: store_large_offset_float
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11_w, $x11_w, $x12_w, $x13_w
+
+ ; CHECK-LABEL: name: store_large_offset_float
+ ; CHECK: liveins: $x10, $x11_w, $x11_w, $x12_w, $x13_w
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x14 = ADDI $x10, 384
+ ; CHECK-NEXT: SW_INX killed renamable $x10_w, $x14, 16 :: (volatile store (s32) into %ir.0)
+ ; CHECK-NEXT: SW_INX killed renamable $x11_w, $x14, 20 :: (volatile store (s32) into %ir.1)
+ ; CHECK-NEXT: SW_INX killed renamable $x12_w, $x14, 24 :: (volatile store (s32) into %ir.2)
+ ; CHECK-NEXT: SW_INX killed renamable $x13_w, killed $x14, 28 :: (volatile store (s32) into %ir.3)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX killed renamable $x10_w, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
+ SW_INX killed renamable $x11_w, renamable $x10, 404 :: (volatile store (s32) into %ir.1)
+ SW_INX killed renamable $x12_w, renamable $x10, 408 :: (volatile store (s32) into %ir.2)
+ SW_INX killed renamable $x13_w, killed renamable $x10, 412 :: (volatile store (s32) into %ir.3)
+ PseudoRET
+
+...
+---
+name: load_large_offset_float
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: load_large_offset_float
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $x14 = ADDI $x10, 384
+ ; CHECK-NEXT: renamable $x11_w = LW_INX $x14, 16 :: (load (s32) from %ir.arrayidx)
+ ; CHECK-NEXT: renamable $x12_w = LW_INX $x14, 20 :: (load (s32) from %ir.arrayidx1)
+ ; CHECK-NEXT: renamable $x13_w = LW_INX killed $x14, 24 :: (load (s32) from %ir.arrayidx2)
+ ; CHECK-NEXT: PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $x11_w, implicit $x12_w, implicit $x12_w
+ renamable $x11_w = LW_INX renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
+ renamable $x12_w = LW_INX renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
+ renamable $x13_w = LW_INX killed renamable $x10, 408 :: (load (s32) from %ir.arrayidx2)
+ PseudoTAIL target-flags(riscv-call) @load_large_offset_float_1, implicit $x2, implicit $x11_w, implicit $x12_w, implicit $x12_w
+
+...
+---
+name: store_common_value_float_no_opt
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x16_w
+
+ ; CHECK-LABEL: name: store_common_value_float_no_opt
+ ; CHECK: liveins: $x10, $x16_w
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: SW_INX killed renamable $x16_w, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX killed renamable $x16_w, killed renamable $x10, 0 :: (store (s32) into %ir.a)
+ PseudoRET
+
+...
+---
+name: store_common_ptr_float_no_opt
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16, $x10_w
+
+ ; CHECK-LABEL: name: store_common_ptr_float_no_opt
+ ; CHECK: liveins: $x16, $x10_w
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: SW_INX killed renamable $x10_w, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX killed renamable $x10_w, killed renamable $x16, 0 :: (volatile store (s32) into %ir.p)
+ PseudoRET
+
+...
+---
+name: load_common_ptr_float_no_opt
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x16
+
+ ; CHECK-LABEL: name: load_common_ptr_float_no_opt
+ ; CHECK: liveins: $x16
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x10_w = LW_INX killed renamable $x16, 0 :: (load (s32) from %ir.g)
+ ; CHECK-NEXT: PseudoRET implicit $x10_w
+ renamable $x10_w = LW_INX killed renamable $x16, 0 :: (load (s32) from %ir.g)
+ PseudoRET implicit $x10_w
+
+...
+---
+name: store_large_offset_float_no_opt
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11_w, $x12_w
+
+ ; CHECK-LABEL: name: store_large_offset_float_no_opt
+ ; CHECK: liveins: $x10, $x11_w, $x12_w
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: SW_INX killed renamable $x11_w, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
+ ; CHECK-NEXT: SW_INX killed renamable $x12_w, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1)
+ ; CHECK-NEXT: PseudoRET
+ SW_INX killed renamable $x11_w, renamable $x10, 400 :: (volatile store (s32) into %ir.0)
+ SW_INX killed renamable $x12_w, killed renamable $x10, 404 :: (volatile store (s32) into %ir.1)
+ PseudoRET
+
+...
+---
+name: load_large_offset_float_no_opt
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+
+ ; CHECK-LABEL: name: load_large_offset_float_no_opt
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $x11_w = LW_INX renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
+ ; CHECK-NEXT: renamable $x12_w = LW_INX killed renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
+ ; CHECK-NEXT: PseudoRET implicit $x11_w, implicit $x12_w
+ renamable $x11_w = LW_INX renamable $x10, 400 :: (load (s32) from %ir.arrayidx)
+ renamable $x12_w = LW_INX killed renamable $x10, 404 :: (load (s32) from %ir.arrayidx1)
+ PseudoRET implicit $x11_w, implicit $x12_w
+
+...
More information about the llvm-commits
mailing list