[llvm] fe558ef - [RISCV][CodeGen] Support Zfinx codegen
Shao-Ce SUN via llvm-commits
llvm-commits at lists.llvm.org
Tue May 2 09:13:48 PDT 2023
Author: Shao-Ce SUN
Date: 2023-05-03T00:13:38+08:00
New Revision: fe558efe71c12a665d4e1b5e7638baaacfe84cf7
URL: https://github.com/llvm/llvm-project/commit/fe558efe71c12a665d4e1b5e7638baaacfe84cf7
DIFF: https://github.com/llvm/llvm-project/commit/fe558efe71c12a665d4e1b5e7638baaacfe84cf7.diff
LOG: [RISCV][CodeGen] Support Zfinx codegen
This patch was split from D122918 . Co-Author: @liaolucy @realqhc
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D148874
Added:
Modified:
llvm/docs/RISCVUsage.rst
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrFormats.td
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/lib/Target/RISCV/RISCVInstrInfoF.td
llvm/lib/Target/RISCV/RISCVSubtarget.h
llvm/test/CodeGen/RISCV/float-arith-strict.ll
llvm/test/CodeGen/RISCV/float-arith.ll
llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
llvm/test/CodeGen/RISCV/float-br-fcmp.ll
llvm/test/CodeGen/RISCV/float-convert-strict.ll
llvm/test/CodeGen/RISCV/float-convert.ll
llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
llvm/test/CodeGen/RISCV/float-fcmp.ll
llvm/test/CodeGen/RISCV/float-frem.ll
llvm/test/CodeGen/RISCV/float-imm.ll
llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
llvm/test/CodeGen/RISCV/float-intrinsics.ll
llvm/test/CodeGen/RISCV/float-isnan.ll
llvm/test/CodeGen/RISCV/float-mem.ll
llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
llvm/test/CodeGen/RISCV/float-round-conv.ll
llvm/test/CodeGen/RISCV/float-select-fcmp.ll
llvm/test/CodeGen/RISCV/float-select-icmp.ll
llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
Removed:
################################################################################
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index ae1f91170fc2..5ec8be1a0faf 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -103,7 +103,7 @@ on support follow.
``Zdinx`` Assembly Support
``Zfh`` Supported
``Zfhmin`` Supported
- ``Zfinx`` Assembly Support
+ ``Zfinx`` Supported
``Zhinx`` Assembly Support
``Zhinxmin`` Assembly Support
``Zicbom`` Assembly Support
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 8c303a48b98e..08c2f741d06d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -884,7 +884,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
Opc = RISCV::FMV_H_X;
break;
case MVT::f32:
- Opc = RISCV::FMV_W_X;
+ Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X;
break;
case MVT::f64:
// For RV32, we can't move from a GPR, we need to convert instead. This
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d8ebe5ea9355..e1c1d621d287 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -120,6 +120,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
if (Subtarget.hasStdExtD())
addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
+ if (Subtarget.hasStdExtZfinx())
+ addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass);
static const MVT::SimpleValueType BoolVecVTs[] = {
MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
@@ -407,7 +409,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FPOWI, MVT::i32, Custom);
}
- if (Subtarget.hasStdExtF()) {
+ if (Subtarget.hasStdExtFOrZfinx()) {
setOperationAction(FPLegalNodeTypes, MVT::f32, Legal);
setOperationAction(FPRndMode, MVT::f32,
Subtarget.hasStdExtZfa() ? Legal : Custom);
@@ -424,7 +426,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
}
- if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
+ if (Subtarget.hasStdExtFOrZfinx() && Subtarget.is64Bit())
setOperationAction(ISD::BITCAST, MVT::i32, Custom);
if (Subtarget.hasStdExtD()) {
@@ -462,7 +464,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::LROUND, MVT::i32, Custom);
}
- if (Subtarget.hasStdExtF()) {
+ if (Subtarget.hasStdExtFOrZfinx()) {
setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
Custom);
@@ -1071,7 +1073,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Custom);
if (Subtarget.hasStdExtZfhOrZfhmin())
setOperationAction(ISD::BITCAST, MVT::f16, Custom);
- if (Subtarget.hasStdExtF())
+ if (Subtarget.hasStdExtFOrZfinx())
setOperationAction(ISD::BITCAST, MVT::f32, Custom);
if (Subtarget.hasStdExtD())
setOperationAction(ISD::BITCAST, MVT::f64, Custom);
@@ -1122,7 +1124,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.is64Bit())
setTargetDAGCombine(ISD::SRA);
- if (Subtarget.hasStdExtF())
+ if (Subtarget.hasStdExtFOrZfinx())
setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
if (Subtarget.hasStdExtZbb())
@@ -1135,7 +1137,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::BITREVERSE);
if (Subtarget.hasStdExtZfhOrZfhmin())
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
- if (Subtarget.hasStdExtF())
+ if (Subtarget.hasStdExtFOrZfinx())
setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
if (Subtarget.hasVInstructions())
@@ -1795,7 +1797,7 @@ bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
if (VT == MVT::f16)
IsLegalVT = Subtarget.hasStdExtZfhOrZfhmin();
else if (VT == MVT::f32)
- IsLegalVT = Subtarget.hasStdExtF();
+ IsLegalVT = Subtarget.hasStdExtFOrZfinx();
else if (VT == MVT::f64)
IsLegalVT = Subtarget.hasStdExtD();
@@ -1859,7 +1861,7 @@ MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
EVT VT) const {
// Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
// We might still end up using a GPR but that will be decided based on ABI.
- if (VT == MVT::f16 && Subtarget.hasStdExtF() &&
+ if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
!Subtarget.hasStdExtZfhOrZfhmin())
return MVT::f32;
@@ -1871,7 +1873,7 @@ unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context
EVT VT) const {
// Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled.
// We might still end up using a GPR but that will be decided based on ABI.
- if (VT == MVT::f16 && Subtarget.hasStdExtF() &&
+ if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() &&
!Subtarget.hasStdExtZfhOrZfhmin())
return 1;
@@ -4395,7 +4397,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return FPConv;
}
if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
- Subtarget.hasStdExtF()) {
+ Subtarget.hasStdExtFOrZfinx()) {
SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
SDValue FPConv =
DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
@@ -9099,7 +9101,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
} else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
- Subtarget.hasStdExtF()) {
+ Subtarget.hasStdExtFOrZfinx()) {
SDValue FPConv =
DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
@@ -12642,6 +12644,7 @@ static bool isSelectPseudo(MachineInstr &MI) {
case RISCV::Select_GPR_Using_CC_GPR:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR32INX_Using_CC_GPR:
case RISCV::Select_FPR64_Using_CC_GPR:
return true;
}
@@ -13027,6 +13030,14 @@ static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
FSGNJXOpc = RISCV::FSGNJX_S;
RC = &RISCV::FPR32RegClass;
break;
+ case RISCV::PseudoFROUND_S_INX:
+ CmpOpc = RISCV::FLT_S_INX;
+ F2IOpc = RISCV::FCVT_W_S_INX;
+ I2FOpc = RISCV::FCVT_S_W_INX;
+ FSGNJOpc = RISCV::FSGNJ_S_INX;
+ FSGNJXOpc = RISCV::FSGNJX_S_INX;
+ RC = &RISCV::GPRF32RegClass;
+ break;
case RISCV::PseudoFROUND_D:
assert(Subtarget.is64Bit() && "Expected 64-bit GPR.");
CmpOpc = RISCV::FLT_D;
@@ -13124,6 +13135,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
case RISCV::Select_GPR_Using_CC_GPR:
case RISCV::Select_FPR16_Using_CC_GPR:
case RISCV::Select_FPR32_Using_CC_GPR:
+ case RISCV::Select_FPR32INX_Using_CC_GPR:
case RISCV::Select_FPR64_Using_CC_GPR:
return emitSelectPseudo(MI, BB, Subtarget);
case RISCV::BuildPairF64Pseudo:
@@ -13136,8 +13148,12 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
case RISCV::PseudoQuietFLE_S:
return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLE_S_INX:
+ return emitQuietFCMP(MI, BB, RISCV::FLE_S_INX, RISCV::FEQ_S_INX, Subtarget);
case RISCV::PseudoQuietFLT_S:
return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
+ case RISCV::PseudoQuietFLT_S_INX:
+ return emitQuietFCMP(MI, BB, RISCV::FLT_S_INX, RISCV::FEQ_S_INX, Subtarget);
case RISCV::PseudoQuietFLE_D:
return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
case RISCV::PseudoQuietFLT_D:
@@ -13323,6 +13339,7 @@ RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
RISCV::PseudoVFCVT_F_X_V_MF4_MASK);
case RISCV::PseudoFROUND_H:
case RISCV::PseudoFROUND_S:
+ case RISCV::PseudoFROUND_S_INX:
case RISCV::PseudoFROUND_D:
return emitFROUND(MI, BB, Subtarget);
}
@@ -15573,7 +15590,7 @@ bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
return VT.isVector() ? Subtarget.hasVInstructionsF16()
: Subtarget.hasStdExtZfh();
case MVT::f32:
- return Subtarget.hasStdExtF();
+ return Subtarget.hasStdExtFOrZfinx();
case MVT::f64:
return Subtarget.hasStdExtD();
default:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 2f3c6a6e5ccf..6d9094e914b3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -226,7 +226,7 @@ class Pseudo<dag outs, dag ins, list<dag> pattern, string opcodestr = "", string
let isCodeGenOnly = 1;
}
-class PseudoQuietFCMP<RegisterClass Ty>
+class PseudoQuietFCMP<DAGOperand Ty>
: Pseudo<(outs GPR:$rd), (ins Ty:$rs1, Ty:$rs2), []> {
let hasSideEffects = 1;
let mayLoad = 0;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f3c7385965c8..f2f547e991d6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1271,6 +1271,7 @@ bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
case RISCV::FSGNJ_D:
case RISCV::FSGNJ_S:
case RISCV::FSGNJ_H:
+ case RISCV::FSGNJ_S_INX:
// The canonical floating-point move is fsgnj rd, rs, rs.
return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
MI.getOperand(1).getReg() == MI.getOperand(2).getReg();
@@ -1300,6 +1301,7 @@ RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {
case RISCV::FSGNJ_D:
case RISCV::FSGNJ_S:
case RISCV::FSGNJ_H:
+ case RISCV::FSGNJ_S_INX:
// The canonical floating-point move is fsgnj rd, rs, rs.
if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() &&
MI.getOperand(1).getReg() == MI.getOperand(2).getReg())
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 6a34a218fcdb..59041fab1fef 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1438,7 +1438,7 @@ def PseudoCCSUBW : Pseudo<(outs GPR:$dst),
Sched<[WriteSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB, ReadSFB]>;
}
-multiclass SelectCC_GPR_rrirr<RegisterClass valty> {
+multiclass SelectCC_GPR_rrirr<DAGOperand valty> {
let usesCustomInserter = 1 in
def _Using_CC_GPR : Pseudo<(outs valty:$dst),
(ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index 0107c66289ff..0ac5d5177a70 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -161,7 +161,7 @@ def frmarg : Operand<XLenVT> {
//===----------------------------------------------------------------------===//
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
-class FPLoad_r<bits<3> funct3, string opcodestr, RegisterClass rty,
+class FPLoad_r<bits<3> funct3, string opcodestr, DAGOperand rty,
SchedWrite sw>
: RVInstI<funct3, OPC_LOAD_FP, (outs rty:$rd),
(ins GPRMem:$rs1, simm12:$imm12),
@@ -169,7 +169,7 @@ class FPLoad_r<bits<3> funct3, string opcodestr, RegisterClass rty,
Sched<[sw, ReadFMemBase]>;
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
-class FPStore_r<bits<3> funct3, string opcodestr, RegisterClass rty,
+class FPStore_r<bits<3> funct3, string opcodestr, DAGOperand rty,
SchedWrite sw>
: RVInstS<funct3, OPC_STORE_FP, (outs),
(ins rty:$rs2, GPRMem:$rs1, simm12:$imm12),
@@ -268,7 +268,7 @@ multiclass FPCmp_rr_m<bits<7> funct7, bits<3> funct3, string opcodestr,
def Ext.Suffix : FPCmp_rr<funct7, funct3, opcodestr, Ext.Reg, Commutable>;
}
-class PseudoFROUND<RegisterClass Ty>
+class PseudoFROUND<DAGOperand Ty>
: Pseudo<(outs Ty:$rd), (ins Ty:$rs1, Ty:$rs2, ixlenimm:$rm),
[(set Ty:$rd, (riscv_fround Ty:$rs1, Ty:$rs2, timm:$rm))]> {
let hasSideEffects = 0;
@@ -433,6 +433,10 @@ def : InstAlias<"fgt.s $rd, $rs, $rt",
(FLT_S_INX GPR:$rd, FPR32INX:$rt, FPR32INX:$rs), 0>;
def : InstAlias<"fge.s $rd, $rs, $rt",
(FLE_S_INX GPR:$rd, FPR32INX:$rt, FPR32INX:$rs), 0>;
+let usesCustomInserter = 1 in {
+def PseudoQuietFLE_S_INX : PseudoQuietFCMP<FPR32INX>;
+def PseudoQuietFLT_S_INX : PseudoQuietFCMP<FPR32INX>;
+}
} // Predicates = [HasStdExtZfinx]
//===----------------------------------------------------------------------===//
@@ -450,18 +454,39 @@ defvar FRM_DYN = 0b111;
def fpimm0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(+0.0); }]>;
/// Generic pattern classes
-class PatSetCC<RegisterClass Ty, SDPatternOperator OpNode, CondCode Cond, RVInst Inst>
+class PatSetCC<DAGOperand Ty, SDPatternOperator OpNode, CondCode Cond,
+ RVInst Inst>
: Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>;
+multiclass PatSetCC_m<SDPatternOperator OpNode, CondCode Cond,
+ RVInst Inst, list<ExtInfo_r> Exts> {
+ foreach Ext = Exts in
+ let Predicates = Ext.Predicates in
+ def Ext.Suffix : PatSetCC<Ext.Reg, OpNode, Cond,
+ !cast<RVInst>(Inst#Ext.Suffix)>;
+}
class PatFprFpr<SDPatternOperator OpNode, RVInstR Inst,
- RegisterClass RegTy>
+ DAGOperand RegTy>
: Pat<(OpNode RegTy:$rs1, RegTy:$rs2), (Inst $rs1, $rs2)>;
+multiclass PatFprFpr_m<SDPatternOperator OpNode, RVInstR Inst,
+ list<ExtInfo_r> Exts> {
+ foreach Ext = Exts in
+ let Predicates = Ext.Predicates in
+ def Ext.Suffix : PatFprFpr<OpNode, !cast<RVInstR>(Inst#Ext.Suffix),
+ Ext.Reg>;
+}
class PatFprFprDynFrm<SDPatternOperator OpNode, RVInstRFrm Inst,
- RegisterClass RegTy>
+ DAGOperand RegTy>
: Pat<(OpNode RegTy:$rs1, RegTy:$rs2), (Inst $rs1, $rs2, FRM_DYN)>;
-
-let Predicates = [HasStdExtF] in {
+multiclass PatFprFprDynFrm_m<SDPatternOperator OpNode, RVInstRFrm Inst,
+ list<ExtInfo_r> Exts> {
+ foreach Ext = Exts in
+ let Predicates = Ext.Predicates in
+ def Ext.Suffix : PatFprFprDynFrm<OpNode,
+ !cast<RVInstRFrm>(Inst#Ext.Suffix),
+ Ext.Reg>;
+}
/// Float conversion operations
@@ -469,20 +494,32 @@ let Predicates = [HasStdExtF] in {
// are defined later.
/// Float arithmetic operations
+defm : PatFprFprDynFrm_m<any_fadd, FADD_S, FINX>;
+defm : PatFprFprDynFrm_m<any_fsub, FSUB_S, FINX>;
+defm : PatFprFprDynFrm_m<any_fmul, FMUL_S, FINX>;
+defm : PatFprFprDynFrm_m<any_fdiv, FDIV_S, FINX>;
-def : PatFprFprDynFrm<any_fadd, FADD_S, FPR32>;
-def : PatFprFprDynFrm<any_fsub, FSUB_S, FPR32>;
-def : PatFprFprDynFrm<any_fmul, FMUL_S, FPR32>;
-def : PatFprFprDynFrm<any_fdiv, FDIV_S, FPR32>;
-
+let Predicates = [HasStdExtF] in {
def : Pat<(any_fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, FRM_DYN)>;
def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>;
def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>;
def : Pat<(riscv_fpclass FPR32:$rs1), (FCLASS_S $rs1)>;
+} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtZfinx] in {
+def : Pat<(any_fsqrt FPR32INX:$rs1), (FSQRT_S_INX FPR32INX:$rs1, FRM_DYN)>;
-def : PatFprFpr<fcopysign, FSGNJ_S, FPR32>;
+def : Pat<(fneg FPR32INX:$rs1), (FSGNJN_S_INX $rs1, $rs1)>;
+def : Pat<(fabs FPR32INX:$rs1), (FSGNJX_S_INX $rs1, $rs1)>;
+
+def : Pat<(riscv_fpclass FPR32INX:$rs1), (FCLASS_S_INX $rs1)>;
+} // Predicates = [HasStdExtZfinx]
+
+defm : PatFprFpr_m<fcopysign, FSGNJ_S, FINX>;
+
+let Predicates = [HasStdExtF] in {
def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>;
// fmadd: rs1 * rs2 + rs3
@@ -504,25 +541,51 @@ def : Pat<(any_fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)),
// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA)
def : Pat<(fneg (any_fma_nsz FPR32:$rs1, FPR32:$rs2, FPR32:$rs3)),
(FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, FRM_DYN)>;
+} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtZfinx] in {
+def : Pat<(fcopysign FPR32INX:$rs1, (fneg FPR32INX:$rs2)), (FSGNJN_S_INX $rs1, $rs2)>;
+
+// fmadd: rs1 * rs2 + rs3
+def : Pat<(any_fma FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3),
+ (FMADD_S_INX $rs1, $rs2, $rs3, FRM_DYN)>;
+
+// fmsub: rs1 * rs2 - rs3
+def : Pat<(any_fma FPR32INX:$rs1, FPR32INX:$rs2, (fneg FPR32INX:$rs3)),
+ (FMSUB_S_INX FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3, FRM_DYN)>;
+
+// fnmsub: -rs1 * rs2 + rs3
+def : Pat<(any_fma (fneg FPR32INX:$rs1), FPR32INX:$rs2, FPR32INX:$rs3),
+ (FNMSUB_S_INX FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3, FRM_DYN)>;
+
+// fnmadd: -rs1 * rs2 - rs3
+def : Pat<(any_fma (fneg FPR32INX:$rs1), FPR32INX:$rs2, (fneg FPR32INX:$rs3)),
+ (FNMADD_S_INX FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3, FRM_DYN)>;
+
+// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA)
+def : Pat<(fneg (any_fma_nsz FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3)),
+ (FNMADD_S_INX FPR32INX:$rs1, FPR32INX:$rs2, FPR32INX:$rs3, FRM_DYN)>;
+} // Predicates = [HasStdExtZfinx]
// The ratified 20191213 ISA spec defines fmin and fmax in a way that matches
// LLVM's fminnum and fmaxnum
// <https://github.com/riscv/riscv-isa-manual/commit/cd20cee7efd9bac7c5aa127ec3b451749d2b3cce>.
-def : PatFprFpr<fminnum, FMIN_S, FPR32>;
-def : PatFprFpr<fmaxnum, FMAX_S, FPR32>;
+defm : PatFprFpr_m<fminnum, FMIN_S, FINX>;
+defm : PatFprFpr_m<fmaxnum, FMAX_S, FINX>;
/// Setcc
// FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for
// strict versions of those.
// Match non-signaling FEQ_S
-def : PatSetCC<FPR32, any_fsetcc, SETEQ, FEQ_S>;
-def : PatSetCC<FPR32, any_fsetcc, SETOEQ, FEQ_S>;
-def : PatSetCC<FPR32, strict_fsetcc, SETLT, PseudoQuietFLT_S>;
-def : PatSetCC<FPR32, strict_fsetcc, SETOLT, PseudoQuietFLT_S>;
-def : PatSetCC<FPR32, strict_fsetcc, SETLE, PseudoQuietFLE_S>;
-def : PatSetCC<FPR32, strict_fsetcc, SETOLE, PseudoQuietFLE_S>;
+defm : PatSetCC_m<any_fsetcc, SETEQ, FEQ_S, FINX>;
+defm : PatSetCC_m<any_fsetcc, SETOEQ, FEQ_S, FINX>;
+defm : PatSetCC_m<strict_fsetcc, SETLT, PseudoQuietFLT_S, FINX>;
+defm : PatSetCC_m<strict_fsetcc, SETOLT, PseudoQuietFLT_S, FINX>;
+defm : PatSetCC_m<strict_fsetcc, SETLE, PseudoQuietFLE_S, FINX>;
+defm : PatSetCC_m<strict_fsetcc, SETOLE, PseudoQuietFLE_S, FINX>;
+let Predicates = [HasStdExtF] in {
// Match signaling FEQ_S
def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ),
(AND (FLE_S $rs1, $rs2),
@@ -535,12 +598,29 @@ def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETEQ),
(FLE_S $rs1, $rs1)>;
def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ),
(FLE_S $rs1, $rs1)>;
+} // Predicates = [HasStdExtF]
+
+let Predicates = [HasStdExtZfinx] in {
+// Match signaling FEQ_S
+def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETEQ),
+ (AND (FLE_S_INX $rs1, $rs2),
+ (FLE_S_INX $rs2, $rs1))>;
+def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs2, SETOEQ),
+ (AND (FLE_S_INX $rs1, $rs2),
+ (FLE_S_INX $rs2, $rs1))>;
+// If both operands are the same, use a single FLE.
+def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETEQ),
+ (FLE_S_INX $rs1, $rs1)>;
+def : Pat<(strict_fsetccs FPR32INX:$rs1, FPR32INX:$rs1, SETOEQ),
+ (FLE_S_INX $rs1, $rs1)>;
+} // Predicates = [HasStdExtZfinx]
-def : PatSetCC<FPR32, any_fsetccs, SETLT, FLT_S>;
-def : PatSetCC<FPR32, any_fsetccs, SETOLT, FLT_S>;
-def : PatSetCC<FPR32, any_fsetccs, SETLE, FLE_S>;
-def : PatSetCC<FPR32, any_fsetccs, SETOLE, FLE_S>;
+defm : PatSetCC_m<any_fsetccs, SETLT, FLT_S, FINX>;
+defm : PatSetCC_m<any_fsetccs, SETOLT, FLT_S, FINX>;
+defm : PatSetCC_m<any_fsetccs, SETLE, FLE_S, FINX>;
+defm : PatSetCC_m<any_fsetccs, SETOLE, FLE_S, FINX>;
+let Predicates = [HasStdExtF] in {
defm Select_FPR32 : SelectCC_GPR_rrirr<FPR32>;
def PseudoFROUND_S : PseudoFROUND<FPR32>;
@@ -555,11 +635,33 @@ defm : StPat<store, FSW, FPR32, f32>;
} // Predicates = [HasStdExtF]
+let Predicates = [HasStdExtZfinx] in {
+defm Select_FPR32INX : SelectCC_GPR_rrirr<FPR32INX>;
+
+def PseudoFROUND_S_INX : PseudoFROUND<FPR32INX>;
+
+/// Loads
+def : Pat<(f32 (load (AddrRegImm GPR:$rs1, simm12:$imm12))),
+ (COPY_TO_REGCLASS (LW GPR:$rs1, simm12:$imm12), GPRF32)>;
+
+/// Stores
+def : Pat<(store (f32 FPR32INX:$rs2), (AddrRegImm GPR:$rs1, simm12:$imm12)),
+ (SW (COPY_TO_REGCLASS FPR32INX:$rs2, GPR), GPR:$rs1, simm12:$imm12)>;
+} // Predicates = [HasStdExtZfinx]
+
let Predicates = [HasStdExtF, IsRV32] in {
// Moves (no conversion)
def : Pat<(bitconvert (i32 GPR:$rs1)), (FMV_W_X GPR:$rs1)>;
def : Pat<(i32 (bitconvert FPR32:$rs1)), (FMV_X_W FPR32:$rs1)>;
+} // Predicates = [HasStdExtF, IsRV32]
+
+let Predicates = [HasStdExtZfinx, IsRV32] in {
+// Moves (no conversion)
+def : Pat<(f32 (bitconvert (i32 GPR:$rs1))), (COPY_TO_REGCLASS GPR:$rs1, GPRF32)>;
+def : Pat<(i32 (bitconvert FPR32INX:$rs1)), (COPY_TO_REGCLASS FPR32INX:$rs1, GPR)>;
+} // Predicates = [HasStdExtZfinx, IsRV32]
+let Predicates = [HasStdExtF, IsRV32] in {
// float->[u]int. Round-to-zero must be used.
def : Pat<(i32 (any_fp_to_sint FPR32:$rs1)), (FCVT_W_S $rs1, FRM_RTZ)>;
def : Pat<(i32 (any_fp_to_uint FPR32:$rs1)), (FCVT_WU_S $rs1, FRM_RTZ)>;
@@ -579,12 +681,30 @@ def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_S_W $rs1, FRM_DYN)>;
def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_S_WU $rs1, FRM_DYN)>;
} // Predicates = [HasStdExtF, IsRV32]
+let Predicates = [HasStdExtZfinx, IsRV32] in {
+// float->[u]int. Round-to-zero must be used.
+def : Pat<(i32 (any_fp_to_sint FPR32INX:$rs1)), (FCVT_W_S_INX $rs1, FRM_RTZ)>;
+def : Pat<(i32 (any_fp_to_uint FPR32INX:$rs1)), (FCVT_WU_S_INX $rs1, FRM_RTZ)>;
+
+// Saturating float->[u]int32.
+def : Pat<(i32 (riscv_fcvt_x FPR32INX:$rs1, timm:$frm)), (FCVT_W_S_INX $rs1, timm:$frm)>;
+def : Pat<(i32 (riscv_fcvt_xu FPR32INX:$rs1, timm:$frm)), (FCVT_WU_S_INX $rs1, timm:$frm)>;
+
+// float->int32 with current rounding mode.
+def : Pat<(i32 (any_lrint FPR32INX:$rs1)), (FCVT_W_S_INX $rs1, FRM_DYN)>;
+
+// float->int32 rounded to nearest with ties rounded away from zero.
+def : Pat<(i32 (any_lround FPR32INX:$rs1)), (FCVT_W_S_INX $rs1, FRM_RMM)>;
+
+// [u]int->float. Match GCC and default to using dynamic rounding mode.
+def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_S_W_INX $rs1, FRM_DYN)>;
+def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_S_WU_INX $rs1, FRM_DYN)>;
+} // Predicates = [HasStdExtZfinx, IsRV32]
+
let Predicates = [HasStdExtF, IsRV64] in {
// Moves (no conversion)
def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (FMV_W_X GPR:$src)>;
def : Pat<(riscv_fmv_x_anyextw_rv64 FPR32:$src), (FMV_X_W FPR32:$src)>;
-def : Pat<(sext_inreg (riscv_fmv_x_anyextw_rv64 FPR32:$src), i32),
- (FMV_X_W FPR32:$src)>;
// Use target specific isd nodes to help us remember the result is sign
// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
@@ -614,3 +734,37 @@ def : Pat<(any_uint_to_fp (i64 (zexti32 (i64 GPR:$rs1)))), (FCVT_S_WU $rs1, FRM_
def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_S_L $rs1, FRM_DYN)>;
def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_S_LU $rs1, FRM_DYN)>;
} // Predicates = [HasStdExtF, IsRV64]
+
+let Predicates = [HasStdExtZfinx, IsRV64] in {
+// Moves (no conversion)
+def : Pat<(riscv_fmv_w_x_rv64 GPR:$src), (COPY_TO_REGCLASS GPR:$src, GPRF32)>;
+def : Pat<(riscv_fmv_x_anyextw_rv64 GPRF32:$src), (COPY_TO_REGCLASS GPRF32:$src, GPR)>;
+
+// Use target specific isd nodes to help us remember the result is sign
+// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be
+// duplicated if it has another user that didn't need the sign_extend.
+def : Pat<(riscv_any_fcvt_w_rv64 FPR32INX:$rs1, timm:$frm), (FCVT_W_S_INX $rs1, timm:$frm)>;
+def : Pat<(riscv_any_fcvt_wu_rv64 FPR32INX:$rs1, timm:$frm), (FCVT_WU_S_INX $rs1, timm:$frm)>;
+
+// float->[u]int64. Round-to-zero must be used.
+def : Pat<(i64 (any_fp_to_sint FPR32INX:$rs1)), (FCVT_L_S_INX $rs1, FRM_RTZ)>;
+def : Pat<(i64 (any_fp_to_uint FPR32INX:$rs1)), (FCVT_LU_S_INX $rs1, FRM_RTZ)>;
+
+// Saturating float->[u]int64.
+def : Pat<(i64 (riscv_fcvt_x FPR32INX:$rs1, timm:$frm)), (FCVT_L_S_INX $rs1, timm:$frm)>;
+def : Pat<(i64 (riscv_fcvt_xu FPR32INX:$rs1, timm:$frm)), (FCVT_LU_S_INX $rs1, timm:$frm)>;
+
+// float->int64 with current rounding mode.
+def : Pat<(i64 (any_lrint FPR32INX:$rs1)), (FCVT_L_S_INX $rs1, FRM_DYN)>;
+def : Pat<(i64 (any_llrint FPR32INX:$rs1)), (FCVT_L_S_INX $rs1, FRM_DYN)>;
+
+// float->int64 rounded to neartest with ties rounded away from zero.
+def : Pat<(i64 (any_lround FPR32INX:$rs1)), (FCVT_L_S_INX $rs1, FRM_DYN)>;
+def : Pat<(i64 (any_llround FPR32INX:$rs1)), (FCVT_L_S_INX $rs1, FRM_DYN)>;
+
+// [u]int->fp. Match GCC and default to using dynamic rounding mode.
+def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_S_W_INX $rs1, FRM_DYN)>;
+def : Pat<(any_uint_to_fp (i64 (zexti32 (i64 GPR:$rs1)))), (FCVT_S_WU_INX $rs1, FRM_DYN)>;
+def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_S_L_INX $rs1, FRM_DYN)>;
+def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_S_LU_INX $rs1, FRM_DYN)>;
+} // Predicates = [HasStdExtZfinx, IsRV64]
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index ccc6f34ce09a..1e5d5e0d84ff 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -113,6 +113,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
bool hasStdExtCOrZca() const { return HasStdExtC || HasStdExtZca; }
bool hasStdExtZvl() const { return ZvlLen != 0; }
+ bool hasStdExtFOrZfinx() const { return HasStdExtF || HasStdExtZfinx; }
bool hasStdExtZfhOrZfhmin() const { return HasStdExtZfh || HasStdExtZfhmin; }
bool is64Bit() const { return IsRV64; }
MVT getXLenVT() const { return XLenVT; }
diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
index 3c6aceca529a..0252c8ca0f72 100644
--- a/llvm/test/CodeGen/RISCV/float-arith-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll
@@ -9,6 +9,12 @@
; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=lp64 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
define float @fadd_s(float %a, float %b) nounwind strictfp {
; CHECKIF-LABEL: fadd_s:
@@ -33,6 +39,11 @@ define float @fadd_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -61,6 +72,11 @@ define float @fsub_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsub.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -89,6 +105,11 @@ define float @fmul_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fmul_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmul.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -117,6 +138,11 @@ define float @fdiv_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fdiv_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fdiv.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -145,6 +171,11 @@ define float @fsqrt_s(float %a) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fsqrt_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsqrt.s a0, a0
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -186,6 +217,24 @@ define float @fmin_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV32IZFINX-LABEL: fmin_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call fminf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fmin_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call fminf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -227,6 +276,24 @@ define float @fmax_s(float %a, float %b) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; RV32IZFINX-LABEL: fmax_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call fmaxf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fmax_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call fmaxf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -255,6 +322,11 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fmadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
%1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
ret float %1
}
@@ -311,6 +383,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fmsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
%c_ = fadd float 0.0, %c ; avoid negation using xor
%negc = fneg float %c_
%1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
@@ -381,6 +459,13 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fnmadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fnmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
%a_ = fadd float 0.0, %a
%c_ = fadd float 0.0, %c
%nega = fneg float %a_
@@ -453,6 +538,13 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fnmadd_s_2:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fnmadd.s a0, a1, a0, a2
+; CHECKIZFINX-NEXT: ret
%b_ = fadd float 0.0, %b
%c_ = fadd float 0.0, %c
%negb = fneg float %b_
@@ -510,6 +602,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fnmsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fnmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
%a_ = fadd float 0.0, %a
%nega = fneg float %a_
%1 = call float @llvm.experimental.constrained.fma.f32(float %nega, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
@@ -567,6 +665,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp {
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fnmsub_s_2:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fnmsub.s a0, a1, a0, a2
+; CHECKIZFINX-NEXT: ret
%b_ = fadd float 0.0, %b
%negb = fneg float %b_
%1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %negb, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll
index d6bb92736f01..5497827a3f2f 100644
--- a/llvm/test/CodeGen/RISCV/float-arith.ll
+++ b/llvm/test/CodeGen/RISCV/float-arith.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=CHECKIF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=CHECKIF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=CHECKIZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=CHECKIZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -19,6 +23,11 @@ define float @fadd_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fadd.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fadd_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -46,6 +55,11 @@ define float @fsub_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fsub.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsub.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fsub_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -73,6 +87,11 @@ define float @fmul_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fmul.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmul_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmul.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmul_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -100,6 +119,11 @@ define float @fdiv_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fdiv.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fdiv_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fdiv.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fdiv_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -129,6 +153,11 @@ define float @fsqrt_s(float %a) nounwind {
; CHECKIF-NEXT: fsqrt.s fa0, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fsqrt_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsqrt.s a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fsqrt_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -158,6 +187,11 @@ define float @fsgnj_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fsgnj.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fsgnj_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsgnj.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fsgnj_s:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 524288
@@ -187,6 +221,13 @@ define i32 @fneg_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: feq.s a0, fa5, fa4
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fneg_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a0
+; CHECKIZFINX-NEXT: fneg.s a1, a0
+; CHECKIZFINX-NEXT: feq.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fneg_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -228,6 +269,12 @@ define float @fsgnjn_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fsgnjn.s fa0, fa0, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fsgnjn_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a0, a1
+; CHECKIZFINX-NEXT: fsgnjn.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fsgnjn_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -279,6 +326,13 @@ define float @fabs_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fadd.s fa0, fa4, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fabs_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: fabs.s a1, a0
+; CHECKIZFINX-NEXT: fadd.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fabs_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -318,6 +372,11 @@ define float @fmin_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fmin.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmin_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmin.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmin_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -347,6 +406,11 @@ define float @fmax_s(float %a, float %b) nounwind {
; CHECKIF-NEXT: fmax.s fa0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmax_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmax.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmax_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -376,6 +440,11 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmadd_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -405,6 +474,12 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fmsub.s fa0, fa0, fa1, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmsub_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -463,6 +538,13 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmadd.s fa0, fa4, fa1, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmadd_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fnmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmadd_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -535,6 +617,13 @@ define float @fnmadd_s_2(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmadd.s fa0, fa4, fa0, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmadd_s_2:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fnmadd.s a0, a1, a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmadd_s_2:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -617,6 +706,13 @@ define float @fnmadd_s_3(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fneg.s fa0, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmadd_s_3:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: lui a1, 524288
+; CHECKIZFINX-NEXT: xor a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmadd_s_3:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -659,6 +755,13 @@ define float @fnmadd_nsz(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmadd_nsz:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: lui a1, 524288
+; CHECKIZFINX-NEXT: xor a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmadd_nsz:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -693,6 +796,12 @@ define float @fnmsub_s(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmsub.s fa0, fa5, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmsub_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fnmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmsub_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -748,6 +857,12 @@ define float @fnmsub_s_2(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmsub.s fa0, fa5, fa0, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmsub_s_2:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fnmsub.s a0, a1, a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmsub_s_2:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -803,6 +918,11 @@ define float @fmadd_s_contract(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmadd_s_contract:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmadd_s_contract:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -843,6 +963,12 @@ define float @fmsub_s_contract(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fmsub.s fa0, fa0, fa1, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmsub_s_contract:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmsub_s_contract:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -908,6 +1034,14 @@ define float @fnmadd_s_contract(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmadd.s fa0, fa4, fa3, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmadd_s_contract:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fadd.s a2, a2, zero
+; CHECKIZFINX-NEXT: fnmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmadd_s_contract:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -993,6 +1127,13 @@ define float @fnmsub_s_contract(float %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fnmsub.s fa0, fa4, fa5, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fnmsub_s_contract:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, zero
+; CHECKIZFINX-NEXT: fadd.s a1, a1, zero
+; CHECKIZFINX-NEXT: fnmsub.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fnmsub_s_contract:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
index f0c34317a562..5c50381ad170 100644
--- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -1,10 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi=ilp32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32ZFINX %s
; RUN: llc -mtriple=riscv32 -mattr=+f,+d -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32FD %s
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64F %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64ZFINX %s
; RUN: llc -mtriple=riscv64 -mattr=+f,+d -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64FD %s
@@ -25,6 +29,13 @@ define float @bitcast_and(float %a1, float %a2) nounwind {
; RV32F-NEXT: fmv.x.w a0, fa5
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_and:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: fadd.s a1, a0, a1
+; RV32ZFINX-NEXT: fabs.s a1, a1
+; RV32ZFINX-NEXT: fadd.s a0, a0, a1
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_and:
; RV32FD: # %bb.0:
; RV32FD-NEXT: fmv.w.x fa5, a1
@@ -45,6 +56,13 @@ define float @bitcast_and(float %a1, float %a2) nounwind {
; RV64F-NEXT: fmv.x.w a0, fa5
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_and:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: fadd.s a1, a0, a1
+; RV64ZFINX-NEXT: fabs.s a1, a1
+; RV64ZFINX-NEXT: fadd.s a0, a0, a1
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_and:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.w.x fa5, a1
@@ -84,6 +102,27 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV32F-NEXT: addi sp, sp, 16
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_double_and:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: addi sp, sp, -16
+; RV32ZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: mv s0, a1
+; RV32ZFINX-NEXT: mv s1, a0
+; RV32ZFINX-NEXT: call __adddf3 at plt
+; RV32ZFINX-NEXT: mv a2, a0
+; RV32ZFINX-NEXT: slli a1, a1, 1
+; RV32ZFINX-NEXT: srli a3, a1, 1
+; RV32ZFINX-NEXT: mv a0, s1
+; RV32ZFINX-NEXT: mv a1, s0
+; RV32ZFINX-NEXT: call __adddf3 at plt
+; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: addi sp, sp, 16
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_double_and:
; RV32FD: # %bb.0:
; RV32FD-NEXT: addi sp, sp, -16
@@ -118,6 +157,22 @@ define double @bitcast_double_and(double %a1, double %a2) nounwind {
; RV64F-NEXT: addi sp, sp, 16
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_double_and:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: addi sp, sp, -16
+; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: mv s0, a0
+; RV64ZFINX-NEXT: call __adddf3 at plt
+; RV64ZFINX-NEXT: slli a0, a0, 1
+; RV64ZFINX-NEXT: srli a1, a0, 1
+; RV64ZFINX-NEXT: mv a0, s0
+; RV64ZFINX-NEXT: call __adddf3 at plt
+; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: addi sp, sp, 16
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_double_and:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.d.x fa5, a1
@@ -147,6 +202,13 @@ define float @bitcast_xor(float %a1, float %a2) nounwind {
; RV32F-NEXT: fmv.x.w a0, fa5
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_xor:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: fmul.s a1, a0, a1
+; RV32ZFINX-NEXT: fneg.s a1, a1
+; RV32ZFINX-NEXT: fmul.s a0, a0, a1
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_xor:
; RV32FD: # %bb.0:
; RV32FD-NEXT: fmv.w.x fa5, a1
@@ -167,6 +229,13 @@ define float @bitcast_xor(float %a1, float %a2) nounwind {
; RV64F-NEXT: fmv.x.w a0, fa5
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_xor:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: fmul.s a1, a0, a1
+; RV64ZFINX-NEXT: fneg.s a1, a1
+; RV64ZFINX-NEXT: fmul.s a0, a0, a1
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_xor:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.w.x fa5, a1
@@ -206,6 +275,27 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV32F-NEXT: addi sp, sp, 16
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_double_xor:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: addi sp, sp, -16
+; RV32ZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: mv s0, a1
+; RV32ZFINX-NEXT: mv s1, a0
+; RV32ZFINX-NEXT: call __muldf3 at plt
+; RV32ZFINX-NEXT: mv a2, a0
+; RV32ZFINX-NEXT: lui a3, 524288
+; RV32ZFINX-NEXT: xor a3, a1, a3
+; RV32ZFINX-NEXT: mv a0, s1
+; RV32ZFINX-NEXT: mv a1, s0
+; RV32ZFINX-NEXT: call __muldf3 at plt
+; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: addi sp, sp, 16
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_double_xor:
; RV32FD: # %bb.0:
; RV32FD-NEXT: addi sp, sp, -16
@@ -241,6 +331,23 @@ define double @bitcast_double_xor(double %a1, double %a2) nounwind {
; RV64F-NEXT: addi sp, sp, 16
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_double_xor:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: addi sp, sp, -16
+; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: mv s0, a0
+; RV64ZFINX-NEXT: call __muldf3 at plt
+; RV64ZFINX-NEXT: li a1, -1
+; RV64ZFINX-NEXT: slli a1, a1, 63
+; RV64ZFINX-NEXT: xor a1, a0, a1
+; RV64ZFINX-NEXT: mv a0, s0
+; RV64ZFINX-NEXT: call __muldf3 at plt
+; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: addi sp, sp, 16
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_double_xor:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.d.x fa5, a1
@@ -270,6 +377,14 @@ define float @bitcast_or(float %a1, float %a2) nounwind {
; RV32F-NEXT: fmv.x.w a0, fa5
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_or:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: fmul.s a1, a0, a1
+; RV32ZFINX-NEXT: fabs.s a1, a1
+; RV32ZFINX-NEXT: fneg.s a1, a1
+; RV32ZFINX-NEXT: fmul.s a0, a0, a1
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_or:
; RV32FD: # %bb.0:
; RV32FD-NEXT: fmv.w.x fa5, a1
@@ -292,6 +407,14 @@ define float @bitcast_or(float %a1, float %a2) nounwind {
; RV64F-NEXT: fmv.x.w a0, fa5
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_or:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: fmul.s a1, a0, a1
+; RV64ZFINX-NEXT: fabs.s a1, a1
+; RV64ZFINX-NEXT: fneg.s a1, a1
+; RV64ZFINX-NEXT: fmul.s a0, a0, a1
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_or:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.w.x fa5, a1
@@ -332,6 +455,27 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV32F-NEXT: addi sp, sp, 16
; RV32F-NEXT: ret
;
+; RV32ZFINX-LABEL: bitcast_double_or:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: addi sp, sp, -16
+; RV32ZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ZFINX-NEXT: mv s0, a1
+; RV32ZFINX-NEXT: mv s1, a0
+; RV32ZFINX-NEXT: call __muldf3 at plt
+; RV32ZFINX-NEXT: mv a2, a0
+; RV32ZFINX-NEXT: lui a3, 524288
+; RV32ZFINX-NEXT: or a3, a1, a3
+; RV32ZFINX-NEXT: mv a0, s1
+; RV32ZFINX-NEXT: mv a1, s0
+; RV32ZFINX-NEXT: call __muldf3 at plt
+; RV32ZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ZFINX-NEXT: addi sp, sp, 16
+; RV32ZFINX-NEXT: ret
+;
; RV32FD-LABEL: bitcast_double_or:
; RV32FD: # %bb.0:
; RV32FD-NEXT: addi sp, sp, -16
@@ -368,6 +512,23 @@ define double @bitcast_double_or(double %a1, double %a2) nounwind {
; RV64F-NEXT: addi sp, sp, 16
; RV64F-NEXT: ret
;
+; RV64ZFINX-LABEL: bitcast_double_or:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: addi sp, sp, -16
+; RV64ZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
+; RV64ZFINX-NEXT: mv s0, a0
+; RV64ZFINX-NEXT: call __muldf3 at plt
+; RV64ZFINX-NEXT: li a1, -1
+; RV64ZFINX-NEXT: slli a1, a1, 63
+; RV64ZFINX-NEXT: or a1, a0, a1
+; RV64ZFINX-NEXT: mv a0, s0
+; RV64ZFINX-NEXT: call __muldf3 at plt
+; RV64ZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64ZFINX-NEXT: addi sp, sp, 16
+; RV64ZFINX-NEXT: ret
+;
; RV64FD-LABEL: bitcast_double_or:
; RV64FD: # %bb.0:
; RV64FD-NEXT: fmv.d.x fa5, a1
diff --git a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
index 3741e8648040..3324d366cf0e 100644
--- a/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
+++ b/llvm/test/CodeGen/RISCV/float-bitmanip-dagcombines.ll
@@ -3,10 +3,14 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IZFINX %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IZFINX %s
; This file tests cases where simple floating point operations can be
; profitably handled though bit manipulation if a soft-float ABI is being used
@@ -27,6 +31,12 @@ define float @fneg(float %a) nounwind {
; RV32IF-NEXT: xor a0, a0, a1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fneg:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 524288
+; RV32IZFINX-NEXT: xor a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
; RV64I-LABEL: fneg:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 524288
@@ -38,6 +48,12 @@ define float @fneg(float %a) nounwind {
; RV64IF-NEXT: lui a1, 524288
; RV64IF-NEXT: xor a0, a0, a1
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: fneg:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 524288
+; RV64IZFINX-NEXT: xor a0, a0, a1
+; RV64IZFINX-NEXT: ret
%1 = fneg float %a
ret float %1
}
@@ -57,6 +73,12 @@ define float @fabs(float %a) nounwind {
; RV32IF-NEXT: srli a0, a0, 1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fabs:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: slli a0, a0, 1
+; RV32IZFINX-NEXT: srli a0, a0, 1
+; RV32IZFINX-NEXT: ret
+;
; RV64I-LABEL: fabs:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 33
@@ -68,6 +90,12 @@ define float @fabs(float %a) nounwind {
; RV64IF-NEXT: slli a0, a0, 33
; RV64IF-NEXT: srli a0, a0, 33
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: fabs:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: slli a0, a0, 33
+; RV64IZFINX-NEXT: srli a0, a0, 33
+; RV64IZFINX-NEXT: ret
%1 = call float @llvm.fabs.f32(float %a)
ret float %1
}
@@ -99,6 +127,13 @@ define float @fcopysign_fneg(float %a, float %b) nounwind {
; RV32IF-NEXT: fmv.x.w a0, fa5
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcopysign_fneg:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a2, 524288
+; RV32IZFINX-NEXT: xor a1, a1, a2
+; RV32IZFINX-NEXT: fsgnj.s a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
; RV64I-LABEL: fcopysign_fneg:
; RV64I: # %bb.0:
; RV64I-NEXT: not a1, a1
@@ -116,6 +151,11 @@ define float @fcopysign_fneg(float %a, float %b) nounwind {
; RV64IF-NEXT: fsgnjn.s fa5, fa4, fa5
; RV64IF-NEXT: fmv.x.w a0, fa5
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcopysign_fneg:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fsgnjn.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
%1 = fneg float %b
%2 = call float @llvm.copysign.f32(float %a, float %1)
ret float %2
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index f9901e6fc9dc..71b0f77015b5 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINX %s
declare void @abort()
declare void @exit(i32)
@@ -30,6 +34,28 @@ define void @br_fcmp_false(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_false:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: li a0, 1
+; RV32IZFINX-NEXT: bnez a0, .LBB0_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.then
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB0_2: # %if.else
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_false:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: li a0, 1
+; RV64IZFINX-NEXT: bnez a0, .LBB0_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.then
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB0_2: # %if.else
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp false float %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -61,6 +87,28 @@ define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_oeq:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: feq.s a0, a0, a1
+; RV32IZFINX-NEXT: bnez a0, .LBB1_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB1_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_oeq:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: feq.s a0, a0, a1
+; RV64IZFINX-NEXT: bnez a0, .LBB1_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB1_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -95,6 +143,28 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_oeq_alt:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: feq.s a0, a0, a1
+; RV32IZFINX-NEXT: bnez a0, .LBB2_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB2_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_oeq_alt:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: feq.s a0, a0, a1
+; RV64IZFINX-NEXT: bnez a0, .LBB2_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB2_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
if.then:
@@ -126,6 +196,28 @@ define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ogt:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: bnez a0, .LBB3_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB3_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ogt:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a0, a1, a0
+; RV64IZFINX-NEXT: bnez a0, .LBB3_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB3_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ogt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -157,6 +249,28 @@ define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_oge:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fle.s a0, a1, a0
+; RV32IZFINX-NEXT: bnez a0, .LBB4_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB4_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_oge:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fle.s a0, a1, a0
+; RV64IZFINX-NEXT: bnez a0, .LBB4_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB4_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp oge float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -188,6 +302,28 @@ define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_olt:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a0, a0, a1
+; RV32IZFINX-NEXT: bnez a0, .LBB5_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB5_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_olt:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a0, a0, a1
+; RV64IZFINX-NEXT: bnez a0, .LBB5_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB5_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp olt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -219,6 +355,28 @@ define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ole:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fle.s a0, a0, a1
+; RV32IZFINX-NEXT: bnez a0, .LBB6_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB6_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ole:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fle.s a0, a0, a1
+; RV64IZFINX-NEXT: bnez a0, .LBB6_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB6_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ole float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -254,6 +412,32 @@ define void @br_fcmp_one(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_one:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a2, a0, a1
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: or a0, a0, a2
+; RV32IZFINX-NEXT: bnez a0, .LBB7_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB7_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_one:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a2, a0, a1
+; RV64IZFINX-NEXT: flt.s a0, a1, a0
+; RV64IZFINX-NEXT: or a0, a0, a2
+; RV64IZFINX-NEXT: bnez a0, .LBB7_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB7_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp one float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -289,6 +473,32 @@ define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ord:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: feq.s a1, a1, a1
+; RV32IZFINX-NEXT: feq.s a0, a0, a0
+; RV32IZFINX-NEXT: and a0, a0, a1
+; RV32IZFINX-NEXT: bnez a0, .LBB8_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB8_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ord:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: feq.s a1, a1, a1
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: bnez a0, .LBB8_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB8_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ord float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -324,6 +534,32 @@ define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ueq:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a2, a0, a1
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: or a0, a0, a2
+; RV32IZFINX-NEXT: beqz a0, .LBB9_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB9_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ueq:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a2, a0, a1
+; RV64IZFINX-NEXT: flt.s a0, a1, a0
+; RV64IZFINX-NEXT: or a0, a0, a2
+; RV64IZFINX-NEXT: beqz a0, .LBB9_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB9_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ueq float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -355,6 +591,28 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ugt:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fle.s a0, a0, a1
+; RV32IZFINX-NEXT: beqz a0, .LBB10_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB10_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ugt:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fle.s a0, a0, a1
+; RV64IZFINX-NEXT: beqz a0, .LBB10_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB10_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ugt float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -386,6 +644,28 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_uge:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a0, a0, a1
+; RV32IZFINX-NEXT: beqz a0, .LBB11_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB11_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_uge:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a0, a0, a1
+; RV64IZFINX-NEXT: beqz a0, .LBB11_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB11_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp uge float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -417,6 +697,28 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ult:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fle.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB12_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB12_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ult:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fle.s a0, a1, a0
+; RV64IZFINX-NEXT: beqz a0, .LBB12_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB12_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ult float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -448,6 +750,28 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_ule:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB13_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB13_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_ule:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: flt.s a0, a1, a0
+; RV64IZFINX-NEXT: beqz a0, .LBB13_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB13_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp ule float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -479,6 +803,28 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_une:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: feq.s a0, a0, a1
+; RV32IZFINX-NEXT: beqz a0, .LBB14_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB14_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_une:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: feq.s a0, a0, a1
+; RV64IZFINX-NEXT: beqz a0, .LBB14_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB14_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp une float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -514,6 +860,32 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_uno:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: feq.s a1, a1, a1
+; RV32IZFINX-NEXT: feq.s a0, a0, a0
+; RV32IZFINX-NEXT: and a0, a0, a1
+; RV32IZFINX-NEXT: beqz a0, .LBB15_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB15_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_uno:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: feq.s a1, a1, a1
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: beqz a0, .LBB15_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB15_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp uno float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -545,6 +917,28 @@ define void @br_fcmp_true(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_true:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: li a0, 1
+; RV32IZFINX-NEXT: bnez a0, .LBB16_2
+; RV32IZFINX-NEXT: # %bb.1: # %if.else
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB16_2: # %if.then
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_true:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: li a0, 1
+; RV64IZFINX-NEXT: bnez a0, .LBB16_2
+; RV64IZFINX-NEXT: # %bb.1: # %if.else
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB16_2: # %if.then
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call abort at plt
%1 = fcmp true float %a, %b
br i1 %1, label %if.then, label %if.else
if.else:
@@ -604,6 +998,48 @@ define i32 @br_fcmp_store_load_stack_slot(float %a, float %b) nounwind {
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB17_3: # %if.then
; RV64IF-NEXT: call abort at plt
+;
+; RV32IZFINX-LABEL: br_fcmp_store_load_stack_slot:
+; RV32IZFINX: # %bb.0: # %entry
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: li a0, 0
+; RV32IZFINX-NEXT: call dummy at plt
+; RV32IZFINX-NEXT: feq.s a0, a0, zero
+; RV32IZFINX-NEXT: beqz a0, .LBB17_3
+; RV32IZFINX-NEXT: # %bb.1: # %if.end
+; RV32IZFINX-NEXT: li a0, 0
+; RV32IZFINX-NEXT: call dummy at plt
+; RV32IZFINX-NEXT: feq.s a0, a0, zero
+; RV32IZFINX-NEXT: beqz a0, .LBB17_3
+; RV32IZFINX-NEXT: # %bb.2: # %if.end4
+; RV32IZFINX-NEXT: li a0, 0
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+; RV32IZFINX-NEXT: .LBB17_3: # %if.then
+; RV32IZFINX-NEXT: call abort at plt
+;
+; RV64IZFINX-LABEL: br_fcmp_store_load_stack_slot:
+; RV64IZFINX: # %bb.0: # %entry
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: li a0, 0
+; RV64IZFINX-NEXT: call dummy at plt
+; RV64IZFINX-NEXT: feq.s a0, a0, zero
+; RV64IZFINX-NEXT: beqz a0, .LBB17_3
+; RV64IZFINX-NEXT: # %bb.1: # %if.end
+; RV64IZFINX-NEXT: li a0, 0
+; RV64IZFINX-NEXT: call dummy at plt
+; RV64IZFINX-NEXT: feq.s a0, a0, zero
+; RV64IZFINX-NEXT: beqz a0, .LBB17_3
+; RV64IZFINX-NEXT: # %bb.2: # %if.end4
+; RV64IZFINX-NEXT: li a0, 0
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+; RV64IZFINX-NEXT: .LBB17_3: # %if.then
+; RV64IZFINX-NEXT: call abort at plt
entry:
%call = call float @dummy(float 0.000000e+00)
%cmp = fcmp une float %call, 0.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index 47ebb49a8923..38fe0b911005 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -5,6 +5,12 @@
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -disable-strictnode-mutation -target-abi=lp64f \
; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=lp64 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -20,6 +26,11 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_w_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -48,6 +59,11 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_wu_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -80,6 +96,13 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
; CHECKIF-NEXT: add a0, a0, a1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_wu_s_multiple_use:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; CHECKIZFINX-NEXT: seqz a1, a0
+; CHECKIZFINX-NEXT: add a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_multiple_use:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -113,6 +136,11 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -143,6 +171,12 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_load:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: lw a0, 0(a0)
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_load:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -173,6 +207,11 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -209,6 +248,18 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
; RV64IF-NEXT: fcvt.s.wu fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_wu_load:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lw a0, 0(a0)
+; RV32IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_wu_load:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lwu a0, 0(a0)
+; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_load:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -248,6 +299,20 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_l_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_l_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_l_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -285,6 +350,20 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_lu_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_lu_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_lu_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -322,6 +401,20 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.s.l fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_l:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __floatdisf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_l:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.l a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_l:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -359,6 +452,20 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.s.lu fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_lu:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __floatundisf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_lu:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.lu a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_lu:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -387,6 +494,11 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_i8:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -415,6 +527,11 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu_i8:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -443,6 +560,11 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_i16:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -471,6 +593,11 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu_i16:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -509,6 +636,22 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64IF-NEXT: fsw fa5, 0(a1)
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_w_demanded_bits:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi a0, a0, 1
+; RV32IZFINX-NEXT: fcvt.s.w a2, a0
+; RV32IZFINX-NEXT: sw a2, 0(a1)
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_w_demanded_bits:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addiw a2, a0, 1
+; RV64IZFINX-NEXT: addi a0, a0, 1
+; RV64IZFINX-NEXT: fcvt.s.w a0, a0
+; RV64IZFINX-NEXT: sw a0, 0(a1)
+; RV64IZFINX-NEXT: mv a0, a2
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_demanded_bits:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -566,6 +709,20 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64IF-NEXT: fsw fa5, 0(a1)
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_wu_demanded_bits:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi a0, a0, 1
+; RV32IZFINX-NEXT: fcvt.s.wu a2, a0
+; RV32IZFINX-NEXT: sw a2, 0(a1)
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_wu_demanded_bits:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addiw a0, a0, 1
+; RV64IZFINX-NEXT: fcvt.s.wu a2, a0
+; RV64IZFINX-NEXT: sw a2, 0(a1)
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_demanded_bits:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index fc9017ea64ed..7224f5b79b7a 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -14,6 +18,11 @@ define i32 @fcvt_w_s(float %a) nounwind {
; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_w_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -45,6 +54,15 @@ define i32 @fcvt_w_s_sat(float %a) nounwind {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_w_s_sat:
+; CHECKIZFINX: # %bb.0: # %start
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_sat:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -32
@@ -140,6 +158,11 @@ define i32 @fcvt_wu_s(float %a) nounwind {
; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_wu_s:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -171,6 +194,13 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
; CHECKIF-NEXT: add a0, a0, a1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_wu_s_multiple_use:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; CHECKIZFINX-NEXT: seqz a1, a0
+; CHECKIZFINX-NEXT: add a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_multiple_use:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -219,6 +249,26 @@ define i32 @fcvt_wu_s_sat(float %a) nounwind {
; RV64IF-NEXT: srli a0, a0, 32
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_sat:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: fcvt.wu.s a1, a0, rtz
+; RV32IZFINX-NEXT: feq.s a0, a0, a0
+; RV32IZFINX-NEXT: seqz a0, a0
+; RV32IZFINX-NEXT: addi a0, a0, -1
+; RV32IZFINX-NEXT: and a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_sat:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: fcvt.wu.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addiw a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a1, a0
+; RV64IZFINX-NEXT: slli a0, a0, 32
+; RV64IZFINX-NEXT: srli a0, a0, 32
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_sat:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -293,6 +343,11 @@ define i32 @fmv_x_w(float %a, float %b) nounwind {
; CHECKIF-NEXT: fmv.x.w a0, fa5
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmv_x_w:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmv_x_w:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -322,6 +377,11 @@ define float @fcvt_s_w(i32 %a) nounwind {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -351,6 +411,12 @@ define float @fcvt_s_w_load(ptr %p) nounwind {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_load:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: lw a0, 0(a0)
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_load:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -381,6 +447,11 @@ define float @fcvt_s_wu(i32 %a) nounwind {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -416,6 +487,18 @@ define float @fcvt_s_wu_load(ptr %p) nounwind {
; RV64IF-NEXT: fcvt.s.wu fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_wu_load:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lw a0, 0(a0)
+; RV32IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_wu_load:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lwu a0, 0(a0)
+; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_load:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -448,6 +531,11 @@ define float @fmv_w_x(i32 %a, i32 %b) nounwind {
; CHECKIF-NEXT: fadd.s fa0, fa5, fa4
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmv_w_x:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmv_w_x:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -487,6 +575,20 @@ define i64 @fcvt_l_s(float %a) nounwind {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_l_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_l_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_l_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -556,6 +658,53 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_l_s_sat:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a4, 524288
+; RV32IZFINX-NEXT: lui a2, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB12_2
+; RV32IZFINX-NEXT: # %bb.1: # %start
+; RV32IZFINX-NEXT: mv a2, a1
+; RV32IZFINX-NEXT: .LBB12_2: # %start
+; RV32IZFINX-NEXT: lui a1, %hi(.LCPI12_0)
+; RV32IZFINX-NEXT: lw a1, %lo(.LCPI12_0)(a1)
+; RV32IZFINX-NEXT: flt.s a3, a1, s0
+; RV32IZFINX-NEXT: beqz a3, .LBB12_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: addi a2, a4, -1
+; RV32IZFINX-NEXT: .LBB12_4: # %start
+; RV32IZFINX-NEXT: feq.s a1, s0, s0
+; RV32IZFINX-NEXT: neg a4, a1
+; RV32IZFINX-NEXT: and a1, a4, a2
+; RV32IZFINX-NEXT: neg a2, s1
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: neg a2, a3
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a0, a4, a0
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_l_s_sat:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_l_s_sat:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -32
@@ -673,6 +822,20 @@ define i64 @fcvt_lu_s(float %a) nounwind {
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_lu_s:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_lu_s:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_lu_s:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -729,6 +892,40 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_lu_s_sat:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: fle.s a0, zero, a0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI14_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI14_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_lu_s_sat:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_lu_s_sat:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -32
@@ -810,6 +1007,20 @@ define float @fcvt_s_l(i64 %a) nounwind {
; RV64IF-NEXT: fcvt.s.l fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_l:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __floatdisf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_l:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.l a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_l:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -846,6 +1057,20 @@ define float @fcvt_s_lu(i64 %a) nounwind {
; RV64IF-NEXT: fcvt.s.lu fa0, a0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_lu:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __floatundisf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_lu:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.lu a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_lu:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -873,6 +1098,11 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_i8:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -900,6 +1130,11 @@ define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu_i8:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -927,6 +1162,11 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind {
; CHECKIF-NEXT: fcvt.s.w fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_w_i16:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.w a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -954,6 +1194,11 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
; CHECKIF-NEXT: fcvt.s.wu fa0, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_s_wu_i16:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.s.wu a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -991,6 +1236,22 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64IF-NEXT: fsw fa5, 0(a1)
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_w_demanded_bits:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi a0, a0, 1
+; RV32IZFINX-NEXT: fcvt.s.w a2, a0
+; RV32IZFINX-NEXT: sw a2, 0(a1)
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_w_demanded_bits:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addiw a2, a0, 1
+; RV64IZFINX-NEXT: addi a0, a0, 1
+; RV64IZFINX-NEXT: fcvt.s.w a0, a0
+; RV64IZFINX-NEXT: sw a0, 0(a1)
+; RV64IZFINX-NEXT: mv a0, a2
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_w_demanded_bits:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1048,6 +1309,20 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
; RV64IF-NEXT: fsw fa5, 0(a1)
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_s_wu_demanded_bits:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi a0, a0, 1
+; RV32IZFINX-NEXT: fcvt.s.wu a2, a0
+; RV32IZFINX-NEXT: sw a2, 0(a1)
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_s_wu_demanded_bits:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addiw a0, a0, 1
+; RV64IZFINX-NEXT: fcvt.s.wu a2, a0
+; RV64IZFINX-NEXT: sw a2, 0(a1)
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_s_wu_demanded_bits:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1100,6 +1375,16 @@ define signext i16 @fcvt_w_s_i16(float %a) nounwind {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_w_s_i16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_w_s_i16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1150,6 +1435,32 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV64IF-NEXT: and a0, a0, a1
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_w_s_sat_i16:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: feq.s a1, a0, a0
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI24_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI24_0)(a2)
+; RV32IZFINX-NEXT: neg a1, a1
+; RV32IZFINX-NEXT: lui a3, 815104
+; RV32IZFINX-NEXT: fmax.s a0, a0, a3
+; RV32IZFINX-NEXT: fmin.s a0, a0, a2
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: and a0, a1, a0
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_w_s_sat_i16:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: lui a1, 815104
+; RV64IZFINX-NEXT: lui a2, %hi(.LCPI24_0)
+; RV64IZFINX-NEXT: lw a2, %lo(.LCPI24_0)(a2)
+; RV64IZFINX-NEXT: fmax.s a1, a0, a1
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: neg a0, a0
+; RV64IZFINX-NEXT: fmin.s a1, a1, a2
+; RV64IZFINX-NEXT: fcvt.l.s a1, a1, rtz
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_sat_i16:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -1250,6 +1561,16 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_i16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_i16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1292,6 +1613,24 @@ define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
; RV64IF-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_sat_i16:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32IZFINX-NEXT: lw a1, %lo(.LCPI26_0)(a1)
+; RV32IZFINX-NEXT: fmax.s a0, a0, zero
+; RV32IZFINX-NEXT: fmin.s a0, a0, a1
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_sat_i16:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: lui a1, %hi(.LCPI26_0)
+; RV64IZFINX-NEXT: lw a1, %lo(.LCPI26_0)(a1)
+; RV64IZFINX-NEXT: fmax.s a0, a0, zero
+; RV64IZFINX-NEXT: fmin.s a0, a0, a1
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_sat_i16:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -1382,6 +1721,16 @@ define signext i8 @fcvt_w_s_i8(float %a) nounwind {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_w_s_i8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_w_s_i8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1432,6 +1781,30 @@ define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
; RV64IF-NEXT: and a0, a0, a1
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_w_s_sat_i8:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: feq.s a1, a0, a0
+; RV32IZFINX-NEXT: neg a1, a1
+; RV32IZFINX-NEXT: lui a2, 798720
+; RV32IZFINX-NEXT: fmax.s a0, a0, a2
+; RV32IZFINX-NEXT: lui a2, 274400
+; RV32IZFINX-NEXT: fmin.s a0, a0, a2
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: and a0, a1, a0
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_w_s_sat_i8:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: feq.s a1, a0, a0
+; RV64IZFINX-NEXT: neg a1, a1
+; RV64IZFINX-NEXT: lui a2, 798720
+; RV64IZFINX-NEXT: fmax.s a0, a0, a2
+; RV64IZFINX-NEXT: lui a2, 274400
+; RV64IZFINX-NEXT: fmin.s a0, a0, a2
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: and a0, a1, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_sat_i8:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -1528,6 +1901,16 @@ define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_i8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_i8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1570,6 +1953,22 @@ define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
; RV64IF-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_sat_i8:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: fmax.s a0, a0, zero
+; RV32IZFINX-NEXT: lui a1, 276464
+; RV32IZFINX-NEXT: fmin.s a0, a0, a1
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_sat_i8:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: fmax.s a0, a0, zero
+; RV64IZFINX-NEXT: lui a1, 276464
+; RV64IZFINX-NEXT: fmin.s a0, a0, a1
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_sat_i8:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -1664,6 +2063,26 @@ define zeroext i32 @fcvt_wu_s_sat_zext(float %a) nounwind {
; RV64IF-NEXT: srli a0, a0, 32
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fcvt_wu_s_sat_zext:
+; RV32IZFINX: # %bb.0: # %start
+; RV32IZFINX-NEXT: fcvt.wu.s a1, a0, rtz
+; RV32IZFINX-NEXT: feq.s a0, a0, a0
+; RV32IZFINX-NEXT: seqz a0, a0
+; RV32IZFINX-NEXT: addi a0, a0, -1
+; RV32IZFINX-NEXT: and a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fcvt_wu_s_sat_zext:
+; RV64IZFINX: # %bb.0: # %start
+; RV64IZFINX-NEXT: fcvt.wu.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addiw a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a1, a0
+; RV64IZFINX-NEXT: slli a0, a0, 32
+; RV64IZFINX-NEXT: srli a0, a0, 32
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_wu_s_sat_zext:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -16
@@ -1742,6 +2161,15 @@ define signext i32 @fcvt_w_s_sat_sext(float %a) nounwind {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcvt_w_s_sat_sext:
+; CHECKIZFINX: # %bb.0: # %start
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcvt_w_s_sat_sext:
; RV32I: # %bb.0: # %start
; RV32I-NEXT: addi sp, sp, -32
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
index 2fda42008d45..36eb58fe7454 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp-strict.ll
@@ -5,6 +5,12 @@
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -disable-strictnode-mutation -target-abi=lp64f \
; RUN: | FileCheck -check-prefix=CHECKIF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=ilp32 \
+; RUN: | FileCheck -check-prefix=CHECKIZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -disable-strictnode-mutation -target-abi=lp64 \
+; RUN: | FileCheck -check-prefix=CHECKIZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -16,6 +22,11 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_oeq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_oeq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -50,6 +61,15 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ogt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a3, fflags
+; CHECKIZFINX-NEXT: flt.s a2, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a3
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ogt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -83,6 +103,15 @@ define i32 @fcmp_oge(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_oge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a3, fflags
+; CHECKIZFINX-NEXT: fle.s a2, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a3
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_oge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -118,6 +147,15 @@ define i32 @fcmp_olt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_olt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a3, fflags
+; CHECKIZFINX-NEXT: flt.s a2, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a3
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_olt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -151,6 +189,15 @@ define i32 @fcmp_ole(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ole:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a3, fflags
+; CHECKIZFINX-NEXT: fle.s a2, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a3
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ole:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -191,6 +238,20 @@ define i32 @fcmp_one(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_one:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a3, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a4, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: or a2, a4, a3
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_one:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -249,6 +310,13 @@ define i32 @fcmp_ord(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ord:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a1, a1, a1
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ord:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -290,6 +358,21 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ueq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a3, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a4, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: or a3, a4, a3
+; CHECKIZFINX-NEXT: xori a2, a3, 1
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ueq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -350,6 +433,16 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ugt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: fle.s a3, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: xori a2, a3, 1
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ugt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -384,6 +477,16 @@ define i32 @fcmp_uge(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_uge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a3, a0, a1
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: xori a2, a3, 1
+; CHECKIZFINX-NEXT: feq.s zero, a0, a1
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_uge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -420,6 +523,16 @@ define i32 @fcmp_ult(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ult:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: fle.s a3, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: xori a2, a3, 1
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ult:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -454,6 +567,16 @@ define i32 @fcmp_ule(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: feq.s zero, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ule:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: csrr a2, fflags
+; CHECKIZFINX-NEXT: flt.s a3, a1, a0
+; CHECKIZFINX-NEXT: csrw fflags, a2
+; CHECKIZFINX-NEXT: xori a2, a3, 1
+; CHECKIZFINX-NEXT: feq.s zero, a1, a0
+; CHECKIZFINX-NEXT: mv a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ule:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -485,6 +608,12 @@ define i32 @fcmp_une(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_une:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_une:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -518,6 +647,14 @@ define i32 @fcmp_uno(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_uno:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a1, a1, a1
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_uno:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -550,6 +687,13 @@ define i32 @fcmps_oeq(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_oeq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a2, a1, a0
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: and a0, a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_oeq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -581,6 +725,11 @@ define i32 @fcmps_ogt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: flt.s a0, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ogt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ogt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -611,6 +760,11 @@ define i32 @fcmps_oge(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: fle.s a0, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_oge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_oge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -643,6 +797,11 @@ define i32 @fcmps_olt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: flt.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_olt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_olt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -673,6 +832,11 @@ define i32 @fcmps_ole(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: fle.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ole:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ole:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -705,6 +869,13 @@ define i32 @fcmps_one(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: or a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_one:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a2, a0, a1
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: or a0, a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_one:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -763,6 +934,13 @@ define i32 @fcmps_ord(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ord:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a1, a1, a1
+; CHECKIZFINX-NEXT: fle.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ord:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -796,6 +974,14 @@ define i32 @fcmps_ueq(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ueq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a2, a0, a1
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: or a0, a0, a2
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ueq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -853,6 +1039,12 @@ define i32 @fcmps_ugt(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ugt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ugt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -884,6 +1076,12 @@ define i32 @fcmps_uge(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_uge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_uge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -917,6 +1115,12 @@ define i32 @fcmps_ult(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ult:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a1, a0
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ult:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -948,6 +1152,12 @@ define i32 @fcmps_ule(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_ule:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_ule:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -981,6 +1191,14 @@ define i32 @fcmps_une(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_une:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a2, a1, a0
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: and a0, a0, a2
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_une:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1014,6 +1232,14 @@ define i32 @fcmps_uno(float %a, float %b) nounwind strictfp {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmps_uno:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a1, a1, a1
+; CHECKIZFINX-NEXT: fle.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmps_uno:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll
index 083262d6372c..b4fbed1321e2 100644
--- a/llvm/test/CodeGen/RISCV/float-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=CHECKIF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=CHECKIF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=CHECKIZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=CHECKIZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -14,6 +18,11 @@ define i32 @fcmp_false(float %a, float %b) nounwind {
; CHECKIF-NEXT: li a0, 0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_false:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: li a0, 0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_false:
; RV32I: # %bb.0:
; RV32I-NEXT: li a0, 0
@@ -34,6 +43,11 @@ define i32 @fcmp_oeq(float %a, float %b) nounwind {
; CHECKIF-NEXT: feq.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_oeq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_oeq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -64,6 +78,11 @@ define i32 @fcmp_ogt(float %a, float %b) nounwind {
; CHECKIF-NEXT: flt.s a0, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ogt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ogt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -94,6 +113,11 @@ define i32 @fcmp_oge(float %a, float %b) nounwind {
; CHECKIF-NEXT: fle.s a0, fa1, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_oge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_oge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -126,6 +150,11 @@ define i32 @fcmp_olt(float %a, float %b) nounwind {
; CHECKIF-NEXT: flt.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_olt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_olt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -156,6 +185,11 @@ define i32 @fcmp_ole(float %a, float %b) nounwind {
; CHECKIF-NEXT: fle.s a0, fa0, fa1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ole:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ole:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -188,6 +222,13 @@ define i32 @fcmp_one(float %a, float %b) nounwind {
; CHECKIF-NEXT: or a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_one:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a2, a0, a1
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: or a0, a0, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_one:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -246,6 +287,13 @@ define i32 @fcmp_ord(float %a, float %b) nounwind {
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ord:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a1, a1, a1
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ord:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -279,6 +327,14 @@ define i32 @fcmp_ueq(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ueq:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a2, a0, a1
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: or a0, a0, a2
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ueq:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -336,6 +392,12 @@ define i32 @fcmp_ugt(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ugt:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ugt:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -367,6 +429,12 @@ define i32 @fcmp_uge(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_uge:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_uge:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -400,6 +468,12 @@ define i32 @fcmp_ult(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ult:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fle.s a0, a1, a0
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ult:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -431,6 +505,12 @@ define i32 @fcmp_ule(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_ule:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: flt.s a0, a1, a0
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_ule:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -462,6 +542,12 @@ define i32 @fcmp_une(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_une:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_une:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -495,6 +581,14 @@ define i32 @fcmp_uno(float %a, float %b) nounwind {
; CHECKIF-NEXT: xori a0, a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_uno:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: feq.s a1, a1, a1
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: xori a0, a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_uno:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -525,6 +619,11 @@ define i32 @fcmp_true(float %a, float %b) nounwind {
; CHECKIF-NEXT: li a0, 1
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fcmp_true:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: li a0, 1
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fcmp_true:
; RV32I: # %bb.0:
; RV32I-NEXT: li a0, 1
diff --git a/llvm/test/CodeGen/RISCV/float-frem.ll b/llvm/test/CodeGen/RISCV/float-frem.ll
index d8e90b2029e6..cb80c6cfbeaa 100644
--- a/llvm/test/CodeGen/RISCV/float-frem.ll
+++ b/llvm/test/CodeGen/RISCV/float-frem.ll
@@ -3,6 +3,10 @@
; RUN: | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64IZFINX %s
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
@@ -22,6 +26,19 @@ define float @frem_f32(float %a, float %b) nounwind {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: frem_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail fmodf at plt
+;
+; RV64IZFINX-LABEL: frem_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call fmodf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: frem_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
index c5fbe6da87a6..a99ba3faa576 100644
--- a/llvm/test/CodeGen/RISCV/float-imm.ll
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck --check-prefixes=CHECKZFINX,RV32ZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck --check-prefixes=CHECKZFINX,RV64ZFINX %s
; TODO: constant pool shouldn't be necessary for RV64IF.
define float @float_imm() nounwind {
@@ -11,6 +15,18 @@ define float @float_imm() nounwind {
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
; CHECK-NEXT: flw fa0, %lo(.LCPI0_0)(a0)
; CHECK-NEXT: ret
+;
+; RV32ZFINX-LABEL: float_imm:
+; RV32ZFINX: # %bb.0:
+; RV32ZFINX-NEXT: lui a0, 263313
+; RV32ZFINX-NEXT: addi a0, a0, -37
+; RV32ZFINX-NEXT: ret
+;
+; RV64ZFINX-LABEL: float_imm:
+; RV64ZFINX: # %bb.0:
+; RV64ZFINX-NEXT: lui a0, %hi(.LCPI0_0)
+; RV64ZFINX-NEXT: lw a0, %lo(.LCPI0_0)(a0)
+; RV64ZFINX-NEXT: ret
ret float 3.14159274101257324218750
}
@@ -21,6 +37,12 @@ define float @float_imm_op(float %a) nounwind {
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: fadd.s fa0, fa0, fa5
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: float_imm_op:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: lui a1, 260096
+; CHECKZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKZFINX-NEXT: ret
%1 = fadd float %a, 1.0
ret float %1
}
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
index 736aa8c10955..d149b35f61c8 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics-strict.ll
@@ -5,6 +5,12 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64f \
; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zfinx \
+; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=ilp32 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfinx \
+; RUN: -verify-machineinstrs -disable-strictnode-mutation -target-abi=lp64 \
+; RUN: | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \
; RUN: -verify-machineinstrs -disable-strictnode-mutation \
; RUN: | FileCheck -check-prefix=RV32I %s
@@ -20,6 +26,11 @@ define float @sqrt_f32(float %a) nounwind strictfp {
; CHECKIF-NEXT: fsqrt.s fa0, fa0
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: sqrt_f32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fsqrt.s a0, a0
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: sqrt_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -63,6 +74,25 @@ define float @powi_f32(float %a, i32 %b) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: powi_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call __powisf2 at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: powi_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sext.w a1, a1
+; RV64IZFINX-NEXT: call __powisf2 at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: powi_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -106,6 +136,24 @@ define float @sin_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: sin_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call sinf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: sin_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call sinf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: sin_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -148,6 +196,24 @@ define float @cos_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: cos_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call cosf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: cos_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call cosf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: cos_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -207,6 +273,42 @@ define float @sincos_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: sincos_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: call sinf at plt
+; RV32IZFINX-NEXT: mv s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call cosf at plt
+; RV32IZFINX-NEXT: fadd.s a0, s1, a0
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: sincos_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -32
+; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: call sinf at plt
+; RV64IZFINX-NEXT: mv s1, a0
+; RV64IZFINX-NEXT: mv a0, s0
+; RV64IZFINX-NEXT: call cosf at plt
+; RV64IZFINX-NEXT: fadd.s a0, s1, a0
+; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 32
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: sincos_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -273,6 +375,24 @@ define float @pow_f32(float %a, float %b) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: pow_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call powf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: pow_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call powf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: pow_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -315,6 +435,24 @@ define float @exp_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: exp_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call expf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: exp_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call expf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: exp_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -357,6 +495,24 @@ define float @exp2_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: exp2_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call exp2f at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: exp2_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call exp2f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: exp2_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -399,6 +555,24 @@ define float @log_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: log_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call logf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: log_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call logf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -441,6 +615,24 @@ define float @log10_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: log10_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call log10f at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: log10_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call log10f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log10_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -483,6 +675,24 @@ define float @log2_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: log2_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call log2f at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: log2_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call log2f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log2_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -512,6 +722,11 @@ define float @fma_f32(float %a, float %b, float %c) nounwind strictfp {
; CHECKIF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fma_f32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fma_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -541,6 +756,11 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind strictfp {
; CHECKIF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; CHECKIF-NEXT: ret
;
+; CHECKIZFINX-LABEL: fmuladd_f32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; CHECKIZFINX-NEXT: ret
+;
; RV32I-LABEL: fmuladd_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -593,6 +813,24 @@ define float @minnum_f32(float %a, float %b) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: minnum_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call fminf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: minnum_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call fminf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: minnum_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -635,6 +873,24 @@ define float @maxnum_f32(float %a, float %b) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: maxnum_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call fmaxf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: maxnum_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call fmaxf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: maxnum_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -694,6 +950,24 @@ define float @floor_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: floor_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call floorf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: floor_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call floorf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: floor_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -736,6 +1010,24 @@ define float @ceil_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: ceil_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call ceilf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: ceil_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call ceilf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: ceil_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -778,6 +1070,24 @@ define float @trunc_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: trunc_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call truncf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: trunc_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call truncf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: trunc_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -820,6 +1130,24 @@ define float @rint_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: rint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call rintf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: rint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call rintf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: rint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -862,6 +1190,24 @@ define float @nearbyint_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: nearbyint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call nearbyintf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: nearbyint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call nearbyintf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: nearbyint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -904,6 +1250,24 @@ define float @round_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: round_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call roundf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: round_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call roundf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: round_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -946,6 +1310,24 @@ define float @roundeven_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: roundeven_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call roundevenf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: roundeven_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call roundevenf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: roundeven_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -980,6 +1362,16 @@ define iXLen @lrint_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.l.s a0, fa0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: lrint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: lrint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: lrint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1014,6 +1406,16 @@ define iXLen @lround_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: lround_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: lround_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: lround_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1052,6 +1454,20 @@ define i64 @llrint_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.l.s a0, fa0
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: llrint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call llrintf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: llrint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: llrint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1090,6 +1506,20 @@ define i64 @llround_f32(float %a) nounwind strictfp {
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
;
+; RV32IZFINX-LABEL: llround_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call llroundf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: llround_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: llround_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 44ba6b54fe7f..06adc9dd2683 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -2,12 +2,18 @@
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+f \
; RUN: -verify-machineinstrs -target-abi=ilp32f \
; RUN: | FileCheck -check-prefix=RV32IF %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zfinx \
+; RUN: -verify-machineinstrs -target-abi=ilp32 \
+; RUN: | FileCheck -check-prefix=RV32IZFINX %s
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \
; RUN: -verify-machineinstrs -target-abi=ilp32f \
; RUN: | FileCheck -check-prefix=RV32IF %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+f \
; RUN: -verify-machineinstrs -target-abi=lp64f \
; RUN: | FileCheck -check-prefix=RV64IF %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfinx \
+; RUN: -verify-machineinstrs -target-abi=lp64 \
+; RUN: | FileCheck -check-prefix=RV64IZFINX %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \
; RUN: -verify-machineinstrs -target-abi=lp64d \
; RUN: | FileCheck -check-prefix=RV64IF %s
@@ -24,11 +30,21 @@ define float @sqrt_f32(float %a) nounwind {
; RV32IF-NEXT: fsqrt.s fa0, fa0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: sqrt_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fsqrt.s a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: sqrt_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fsqrt.s fa0, fa0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: sqrt_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fsqrt.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: sqrt_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -57,6 +73,10 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail __powisf2 at plt
;
+; RV32IZFINX-LABEL: powi_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail __powisf2 at plt
+;
; RV64IF-LABEL: powi_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: addi sp, sp, -16
@@ -67,6 +87,16 @@ define float @powi_f32(float %a, i32 %b) nounwind {
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: powi_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sext.w a1, a1
+; RV64IZFINX-NEXT: call __powisf2 at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: powi_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -96,10 +126,23 @@ define float @sin_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail sinf at plt
;
+; RV32IZFINX-LABEL: sin_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail sinf at plt
+;
; RV64IF-LABEL: sin_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail sinf at plt
;
+; RV64IZFINX-LABEL: sin_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call sinf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: sin_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -128,10 +171,23 @@ define float @cos_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail cosf at plt
;
+; RV32IZFINX-LABEL: cos_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail cosf at plt
+;
; RV64IF-LABEL: cos_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail cosf at plt
;
+; RV64IZFINX-LABEL: cos_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call cosf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: cos_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -173,6 +229,42 @@ define float @sincos_f32(float %a) nounwind {
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: sincos_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: call sinf at plt
+; RV32IZFINX-NEXT: mv s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call cosf at plt
+; RV32IZFINX-NEXT: fadd.s a0, s1, a0
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: sincos_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -32
+; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: call sinf at plt
+; RV64IZFINX-NEXT: mv s1, a0
+; RV64IZFINX-NEXT: mv a0, s0
+; RV64IZFINX-NEXT: call cosf at plt
+; RV64IZFINX-NEXT: fadd.s a0, s1, a0
+; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 32
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: sincos_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -225,10 +317,23 @@ define float @pow_f32(float %a, float %b) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail powf at plt
;
+; RV32IZFINX-LABEL: pow_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail powf at plt
+;
; RV64IF-LABEL: pow_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail powf at plt
;
+; RV64IZFINX-LABEL: pow_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call powf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: pow_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -257,10 +362,23 @@ define float @exp_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail expf at plt
;
+; RV32IZFINX-LABEL: exp_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail expf at plt
+;
; RV64IF-LABEL: exp_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail expf at plt
;
+; RV64IZFINX-LABEL: exp_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call expf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: exp_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -289,10 +407,23 @@ define float @exp2_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail exp2f at plt
;
+; RV32IZFINX-LABEL: exp2_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail exp2f at plt
+;
; RV64IF-LABEL: exp2_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail exp2f at plt
;
+; RV64IZFINX-LABEL: exp2_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call exp2f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: exp2_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -321,10 +452,23 @@ define float @log_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail logf at plt
;
+; RV32IZFINX-LABEL: log_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail logf at plt
+;
; RV64IF-LABEL: log_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail logf at plt
;
+; RV64IZFINX-LABEL: log_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call logf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -353,10 +497,23 @@ define float @log10_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail log10f at plt
;
+; RV32IZFINX-LABEL: log10_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail log10f at plt
+;
; RV64IF-LABEL: log10_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail log10f at plt
;
+; RV64IZFINX-LABEL: log10_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call log10f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log10_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -385,10 +542,23 @@ define float @log2_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail log2f at plt
;
+; RV32IZFINX-LABEL: log2_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail log2f at plt
+;
; RV64IF-LABEL: log2_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail log2f at plt
;
+; RV64IZFINX-LABEL: log2_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call log2f at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: log2_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -418,11 +588,21 @@ define float @fma_f32(float %a, float %b, float %c) nounwind {
; RV32IF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fma_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: fma_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: fma_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fma_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -452,11 +632,21 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind {
; RV32IF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fmuladd_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: fmuladd_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fmadd.s fa0, fa0, fa1, fa2
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: fmuladd_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fmadd.s a0, a0, a1, a2
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fmuladd_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -496,11 +686,23 @@ define float @fabs_f32(float %a) nounwind {
; RV32IF-NEXT: fabs.s fa0, fa0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fabs_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: slli a0, a0, 1
+; RV32IZFINX-NEXT: srli a0, a0, 1
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: fabs_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fabs.s fa0, fa0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: fabs_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: slli a0, a0, 33
+; RV64IZFINX-NEXT: srli a0, a0, 33
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fabs_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -524,11 +726,21 @@ define float @minnum_f32(float %a, float %b) nounwind {
; RV32IF-NEXT: fmin.s fa0, fa0, fa1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: minnum_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fmin.s a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: minnum_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fmin.s fa0, fa0, fa1
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: minnum_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fmin.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: minnum_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -558,11 +770,21 @@ define float @maxnum_f32(float %a, float %b) nounwind {
; RV32IF-NEXT: fmax.s fa0, fa0, fa1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: maxnum_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fmax.s a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: maxnum_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fmax.s fa0, fa0, fa1
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: maxnum_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fmax.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: maxnum_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -609,11 +831,21 @@ define float @copysign_f32(float %a, float %b) nounwind {
; RV32IF-NEXT: fsgnj.s fa0, fa0, fa1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: copysign_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fsgnj.s a0, a0, a1
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: copysign_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fsgnj.s fa0, fa0, fa1
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: copysign_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fsgnj.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: copysign_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a2, 524288
@@ -652,6 +884,19 @@ define float @floor_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB17_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: floor_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB17_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB17_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: floor_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -666,6 +911,19 @@ define float @floor_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB17_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: floor_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB17_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB17_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: floor_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -704,6 +962,19 @@ define float @ceil_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB18_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: ceil_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB18_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB18_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: ceil_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -718,6 +989,19 @@ define float @ceil_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB18_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: ceil_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB18_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB18_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: ceil_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -756,6 +1040,19 @@ define float @trunc_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB19_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: trunc_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB19_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB19_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: trunc_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -770,6 +1067,19 @@ define float @trunc_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB19_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: trunc_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB19_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB19_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: trunc_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -808,6 +1118,19 @@ define float @rint_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB20_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: rint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB20_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB20_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: rint_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -822,6 +1145,19 @@ define float @rint_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB20_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: rint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB20_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB20_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: rint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -850,10 +1186,23 @@ define float @nearbyint_f32(float %a) nounwind {
; RV32IF: # %bb.0:
; RV32IF-NEXT: tail nearbyintf at plt
;
+; RV32IZFINX-LABEL: nearbyint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: tail nearbyintf at plt
+;
; RV64IF-LABEL: nearbyint_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: tail nearbyintf at plt
;
+; RV64IZFINX-LABEL: nearbyint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: call nearbyintf at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: nearbyint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -892,6 +1241,19 @@ define float @round_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB22_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: round_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB22_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB22_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: round_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -906,6 +1268,19 @@ define float @round_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB22_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: round_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB22_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB22_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: round_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -944,6 +1319,19 @@ define float @roundeven_f32(float %a) nounwind {
; RV32IF-NEXT: .LBB23_2:
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: roundeven_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB23_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB23_2:
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: roundeven_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
@@ -958,6 +1346,19 @@ define float @roundeven_f32(float %a) nounwind {
; RV64IF-NEXT: .LBB23_2:
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: roundeven_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB23_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB23_2:
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: roundeven_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -987,11 +1388,21 @@ define iXLen @lrint_f32(float %a) nounwind {
; RV32IF-NEXT: fcvt.w.s a0, fa0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: lrint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: lrint_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: lrint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: lrint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1022,11 +1433,21 @@ define iXLen @lround_f32(float %a) nounwind {
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: lround_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: lround_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: lround_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: lround_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1056,11 +1477,21 @@ define i32 @lround_i32_f32(float %a) nounwind {
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: lround_i32_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: lround_i32_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: lround_i32_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: lround_i32_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1094,11 +1525,25 @@ define i64 @llrint_f32(float %a) nounwind {
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: llrint_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call llrintf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: llrint_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: llrint_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: llrint_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1132,11 +1577,25 @@ define i64 @llround_f32(float %a) nounwind {
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: llround_f32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: call llroundf at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: llround_f32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: llround_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: llround_f32:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
@@ -1167,6 +1626,13 @@ define i1 @fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 927
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1174,6 +1640,13 @@ define i1 @fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 927
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 1
@@ -1238,6 +1711,13 @@ define i1 @isnan_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isnan_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 768
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isnan_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1245,6 +1725,13 @@ define i1 @isnan_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isnan_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 768
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isnan_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -1272,6 +1759,13 @@ define i1 @isqnan_fpclass(float %x) {
; RV32IF-NEXT: srli a0, a0, 31
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isqnan_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: slli a0, a0, 22
+; RV32IZFINX-NEXT: srli a0, a0, 31
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isqnan_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1279,6 +1773,13 @@ define i1 @isqnan_fpclass(float %x) {
; RV64IF-NEXT: srli a0, a0, 63
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isqnan_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: slli a0, a0, 54
+; RV64IZFINX-NEXT: srli a0, a0, 63
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isqnan_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -1308,6 +1809,13 @@ define i1 @issnan_fpclass(float %x) {
; RV32IF-NEXT: srli a0, a0, 31
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: issnan_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: slli a0, a0, 23
+; RV32IZFINX-NEXT: srli a0, a0, 31
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: issnan_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1315,6 +1823,13 @@ define i1 @issnan_fpclass(float %x) {
; RV64IF-NEXT: srli a0, a0, 63
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: issnan_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: slli a0, a0, 55
+; RV64IZFINX-NEXT: srli a0, a0, 63
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: issnan_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -1348,6 +1863,13 @@ define i1 @isinf_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isinf_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 129
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isinf_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1355,6 +1877,13 @@ define i1 @isinf_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isinf_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 129
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isinf_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -1384,6 +1913,13 @@ define i1 @isposinf_fpclass(float %x) {
; RV32IF-NEXT: srli a0, a0, 31
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isposinf_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: slli a0, a0, 24
+; RV32IZFINX-NEXT: srli a0, a0, 31
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isposinf_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1391,6 +1927,13 @@ define i1 @isposinf_fpclass(float %x) {
; RV64IF-NEXT: srli a0, a0, 63
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isposinf_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: slli a0, a0, 56
+; RV64IZFINX-NEXT: srli a0, a0, 63
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isposinf_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 522240
@@ -1416,12 +1959,24 @@ define i1 @isneginf_fpclass(float %x) {
; RV32IF-NEXT: andi a0, a0, 1
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isneginf_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 1
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isneginf_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
; RV64IF-NEXT: andi a0, a0, 1
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isneginf_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 1
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isneginf_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a1, 1046528
@@ -1448,6 +2003,13 @@ define i1 @isfinite_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isfinite_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 126
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isfinite_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1455,6 +2017,13 @@ define i1 @isfinite_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isfinite_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 126
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isfinite_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
@@ -1482,6 +2051,13 @@ define i1 @isposfinite_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isposfinite_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 112
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isposfinite_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1489,6 +2065,13 @@ define i1 @isposfinite_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isposfinite_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 112
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isposfinite_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a0, a0, 23
@@ -1512,6 +2095,13 @@ define i1 @isnegfinite_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isnegfinite_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 14
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isnegfinite_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1519,6 +2109,13 @@ define i1 @isnegfinite_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isnegfinite_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 14
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isnegfinite_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a0, 1
@@ -1551,6 +2148,13 @@ define i1 @isnotfinite_fpclass(float %x) {
; RV32IF-NEXT: snez a0, a0
; RV32IF-NEXT: ret
;
+; RV32IZFINX-LABEL: isnotfinite_fpclass:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fclass.s a0, a0
+; RV32IZFINX-NEXT: andi a0, a0, 897
+; RV32IZFINX-NEXT: snez a0, a0
+; RV32IZFINX-NEXT: ret
+;
; RV64IF-LABEL: isnotfinite_fpclass:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fclass.s a0, fa0
@@ -1558,6 +2162,13 @@ define i1 @isnotfinite_fpclass(float %x) {
; RV64IF-NEXT: snez a0, a0
; RV64IF-NEXT: ret
;
+; RV64IZFINX-LABEL: isnotfinite_fpclass:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fclass.s a0, a0
+; RV64IZFINX-NEXT: andi a0, a0, 897
+; RV64IZFINX-NEXT: snez a0, a0
+; RV64IZFINX-NEXT: ret
+;
; RV32I-LABEL: isnotfinite_fpclass:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/float-isnan.ll b/llvm/test/CodeGen/RISCV/float-isnan.ll
index d3857cdc609e..1c5355c41755 100644
--- a/llvm/test/CodeGen/RISCV/float-isnan.ll
+++ b/llvm/test/CodeGen/RISCV/float-isnan.ll
@@ -3,6 +3,10 @@
; RUN: < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -target-abi ilp32 -verify-machineinstrs \
+; RUN: < %s | FileCheck --check-prefix=CHECKZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi lp64 -verify-machineinstrs \
+; RUN: < %s | FileCheck --check-prefix=CHECKZFINX %s
define zeroext i1 @float_is_nan(float %a) nounwind {
; CHECK-LABEL: float_is_nan:
@@ -10,6 +14,12 @@ define zeroext i1 @float_is_nan(float %a) nounwind {
; CHECK-NEXT: feq.s a0, fa0, fa0
; CHECK-NEXT: xori a0, a0, 1
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: float_is_nan:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a0, a0, a0
+; CHECKZFINX-NEXT: xori a0, a0, 1
+; CHECKZFINX-NEXT: ret
%1 = fcmp uno float %a, 0.000000e+00
ret i1 %1
}
@@ -19,6 +29,11 @@ define zeroext i1 @float_not_nan(float %a) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: feq.s a0, fa0, fa0
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: float_not_nan:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a0, a0, a0
+; CHECKZFINX-NEXT: ret
%1 = fcmp ord float %a, 0.000000e+00
ret i1 %1
}
diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll
index 145d2e315cf8..8ef4f9162c2f 100644
--- a/llvm/test/CodeGen/RISCV/float-mem.ll
+++ b/llvm/test/CodeGen/RISCV/float-mem.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
define dso_local float @flw(ptr %a) nounwind {
; CHECKIF-LABEL: flw:
@@ -11,6 +15,13 @@ define dso_local float @flw(ptr %a) nounwind {
; CHECKIF-NEXT: flw fa4, 12(a0)
; CHECKIF-NEXT: fadd.s fa0, fa5, fa4
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: flw:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: lw a1, 0(a0)
+; CHECKIZFINX-NEXT: lw a0, 12(a0)
+; CHECKIZFINX-NEXT: fadd.s a0, a1, a0
+; CHECKIZFINX-NEXT: ret
%1 = load float, ptr %a
%2 = getelementptr float, ptr %a, i32 3
%3 = load float, ptr %2
@@ -29,6 +40,13 @@ define dso_local void @fsw(ptr %a, float %b, float %c) nounwind {
; CHECKIF-NEXT: fsw fa5, 0(a0)
; CHECKIF-NEXT: fsw fa5, 32(a0)
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: fsw:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a1, a1, a2
+; CHECKIZFINX-NEXT: sw a1, 0(a0)
+; CHECKIZFINX-NEXT: sw a1, 32(a0)
+; CHECKIZFINX-NEXT: ret
%1 = fadd float %b, %c
store float %1, ptr %a
%2 = getelementptr float, ptr %a, i32 8
@@ -52,6 +70,17 @@ define dso_local float @flw_fsw_global(float %a, float %b) nounwind {
; CHECKIF-NEXT: flw fa5, 36(a1)
; CHECKIF-NEXT: fsw fa0, 36(a1)
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: flw_fsw_global:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fadd.s a0, a0, a1
+; CHECKIZFINX-NEXT: lui a1, %hi(G)
+; CHECKIZFINX-NEXT: lw a2, %lo(G)(a1)
+; CHECKIZFINX-NEXT: addi a2, a1, %lo(G)
+; CHECKIZFINX-NEXT: sw a0, %lo(G)(a1)
+; CHECKIZFINX-NEXT: lw a1, 36(a2)
+; CHECKIZFINX-NEXT: sw a0, 36(a2)
+; CHECKIZFINX-NEXT: ret
%1 = fadd float %a, %b
%2 = load volatile float, ptr @G
store float %1, ptr @G
@@ -79,6 +108,23 @@ define dso_local float @flw_fsw_constant(float %a) nounwind {
; RV64IF-NEXT: fadd.s fa0, fa0, fa5
; RV64IF-NEXT: fsw fa0, -273(a0)
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: flw_fsw_constant:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 912092
+; RV32IZFINX-NEXT: lw a2, -273(a1)
+; RV32IZFINX-NEXT: fadd.s a0, a0, a2
+; RV32IZFINX-NEXT: sw a0, -273(a1)
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: flw_fsw_constant:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 228023
+; RV64IZFINX-NEXT: slli a1, a1, 2
+; RV64IZFINX-NEXT: lw a2, -273(a1)
+; RV64IZFINX-NEXT: fadd.s a0, a0, a2
+; RV64IZFINX-NEXT: sw a0, -273(a1)
+; RV64IZFINX-NEXT: ret
%1 = inttoptr i32 3735928559 to ptr
%2 = load volatile float, ptr %1
%3 = fadd float %a, %2
@@ -118,6 +164,36 @@ define dso_local float @flw_stack(float %a) nounwind {
; RV64IF-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: flw_stack:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: addi a0, sp, 4
+; RV32IZFINX-NEXT: call notdead at plt
+; RV32IZFINX-NEXT: lw a0, 4(sp)
+; RV32IZFINX-NEXT: fadd.s a0, a0, s0
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: flw_stack:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -32
+; RV64IZFINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: mv s0, a0
+; RV64IZFINX-NEXT: addi a0, sp, 12
+; RV64IZFINX-NEXT: call notdead at plt
+; RV64IZFINX-NEXT: lw a0, 12(sp)
+; RV64IZFINX-NEXT: fadd.s a0, a0, s0
+; RV64IZFINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 32
+; RV64IZFINX-NEXT: ret
%1 = alloca float, align 4
call void @notdead(ptr %1)
%2 = load float, ptr %1
@@ -149,6 +225,30 @@ define dso_local void @fsw_stack(float %a, float %b) nounwind {
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: fsw_stack:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: fadd.s a0, a0, a1
+; RV32IZFINX-NEXT: sw a0, 8(sp)
+; RV32IZFINX-NEXT: addi a0, sp, 8
+; RV32IZFINX-NEXT: call notdead at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: fsw_stack:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: addi sp, sp, -16
+; RV64IZFINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64IZFINX-NEXT: fadd.s a0, a0, a1
+; RV64IZFINX-NEXT: sw a0, 4(sp)
+; RV64IZFINX-NEXT: addi a0, sp, 4
+; RV64IZFINX-NEXT: call notdead at plt
+; RV64IZFINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFINX-NEXT: addi sp, sp, 16
+; RV64IZFINX-NEXT: ret
%1 = fadd float %a, %b ; force store from FPR32
%2 = alloca float, align 4
store float %1, ptr %2
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index 4e85ab16c047..61337216c7fb 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZFINX,RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZFINX,RV64IZFINX %s
define signext i32 @test_floor_si32(float %x) {
; CHECKIF-LABEL: test_floor_si32:
@@ -13,6 +17,15 @@ define signext i32 @test_floor_si32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_floor_si32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
ret i32 %b
@@ -76,6 +89,64 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB1_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rdn
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB1_2:
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: neg s2, s1
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI1_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI1_0)(a2)
+; RV32IZFINX-NEXT: and a0, s2, a0
+; RV32IZFINX-NEXT: flt.s a4, a2, s0
+; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: feq.s a2, s0, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: lui a5, 524288
+; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB1_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: .LBB1_4:
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: beqz a4, .LBB1_6
+; RV32IZFINX-NEXT: # %bb.5:
+; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: .LBB1_6:
+; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rdn
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
ret i64 %b
@@ -90,6 +161,15 @@ define signext i32 @test_floor_ui32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_floor_ui32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a1, a0, rdn
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
ret i32 %b
@@ -140,6 +220,49 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB3_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rdn
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB3_2:
+; RV32IZFINX-NEXT: fle.s a0, zero, s0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI3_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI3_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rdn
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
ret i64 %b
@@ -154,6 +277,15 @@ define signext i32 @test_ceil_si32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_ceil_si32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rup
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
ret i32 %b
@@ -217,6 +349,64 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB5_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rup
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB5_2:
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: neg s2, s1
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI5_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI5_0)(a2)
+; RV32IZFINX-NEXT: and a0, s2, a0
+; RV32IZFINX-NEXT: flt.s a4, a2, s0
+; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: feq.s a2, s0, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: lui a5, 524288
+; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB5_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: .LBB5_4:
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: beqz a4, .LBB5_6
+; RV32IZFINX-NEXT: # %bb.5:
+; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: .LBB5_6:
+; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rup
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
ret i64 %b
@@ -231,6 +421,15 @@ define signext i32 @test_ceil_ui32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_ceil_ui32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a1, a0, rup
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
ret i32 %b
@@ -281,6 +480,49 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB7_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rup
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB7_2:
+; RV32IZFINX-NEXT: fle.s a0, zero, s0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI7_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI7_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rup
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
ret i64 %b
@@ -295,6 +537,15 @@ define signext i32 @test_trunc_si32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_trunc_si32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
ret i32 %b
@@ -358,6 +609,64 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB9_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rtz
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB9_2:
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: neg s2, s1
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI9_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI9_0)(a2)
+; RV32IZFINX-NEXT: and a0, s2, a0
+; RV32IZFINX-NEXT: flt.s a4, a2, s0
+; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: feq.s a2, s0, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: lui a5, 524288
+; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB9_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: .LBB9_4:
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: beqz a4, .LBB9_6
+; RV32IZFINX-NEXT: # %bb.5:
+; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: .LBB9_6:
+; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
ret i64 %b
@@ -372,6 +681,15 @@ define signext i32 @test_trunc_ui32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_trunc_ui32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a1, a0, rtz
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
ret i32 %b
@@ -422,6 +740,49 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB11_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rtz
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB11_2:
+; RV32IZFINX-NEXT: fle.s a0, zero, s0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI11_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI11_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rtz
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
ret i64 %b
@@ -436,6 +797,15 @@ define signext i32 @test_round_si32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_round_si32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
ret i32 %b
@@ -499,6 +869,64 @@ define i64 @test_round_si64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB13_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rmm
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB13_2:
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: neg s2, s1
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI13_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI13_0)(a2)
+; RV32IZFINX-NEXT: and a0, s2, a0
+; RV32IZFINX-NEXT: flt.s a4, a2, s0
+; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: feq.s a2, s0, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: lui a5, 524288
+; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB13_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: .LBB13_4:
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: beqz a4, .LBB13_6
+; RV32IZFINX-NEXT: # %bb.5:
+; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: .LBB13_6:
+; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rmm
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
ret i64 %b
@@ -513,6 +941,15 @@ define signext i32 @test_round_ui32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_round_ui32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a1, a0, rmm
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
ret i32 %b
@@ -563,6 +1000,49 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB15_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rmm
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB15_2:
+; RV32IZFINX-NEXT: fle.s a0, zero, s0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI15_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI15_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rmm
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
ret i64 %b
@@ -577,6 +1057,15 @@ define signext i32 @test_roundeven_si32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_roundeven_si32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.w.s a1, a0, rne
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i32 @llvm.fptosi.sat.i32.f32(float %a)
ret i32 %b
@@ -640,6 +1129,64 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB17_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rne
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB17_2:
+; RV32IZFINX-NEXT: lui a0, 913408
+; RV32IZFINX-NEXT: fle.s s1, a0, s0
+; RV32IZFINX-NEXT: neg s2, s1
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI17_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI17_0)(a2)
+; RV32IZFINX-NEXT: and a0, s2, a0
+; RV32IZFINX-NEXT: flt.s a4, a2, s0
+; RV32IZFINX-NEXT: neg a2, a4
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: feq.s a2, s0, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: lui a5, 524288
+; RV32IZFINX-NEXT: lui a3, 524288
+; RV32IZFINX-NEXT: beqz s1, .LBB17_4
+; RV32IZFINX-NEXT: # %bb.3:
+; RV32IZFINX-NEXT: mv a3, a1
+; RV32IZFINX-NEXT: .LBB17_4:
+; RV32IZFINX-NEXT: and a0, a2, a0
+; RV32IZFINX-NEXT: beqz a4, .LBB17_6
+; RV32IZFINX-NEXT: # %bb.5:
+; RV32IZFINX-NEXT: addi a3, a5, -1
+; RV32IZFINX-NEXT: .LBB17_6:
+; RV32IZFINX-NEXT: and a1, a2, a3
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a1, a0, rne
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i64 @llvm.fptosi.sat.i64.f32(float %a)
ret i64 %b
@@ -654,6 +1201,15 @@ define signext i32 @test_roundeven_ui32(float %x) {
; CHECKIF-NEXT: addi a1, a1, -1
; CHECKIF-NEXT: and a0, a1, a0
; CHECKIF-NEXT: ret
+;
+; CHECKIZFINX-LABEL: test_roundeven_ui32:
+; CHECKIZFINX: # %bb.0:
+; CHECKIZFINX-NEXT: fcvt.wu.s a1, a0, rne
+; CHECKIZFINX-NEXT: feq.s a0, a0, a0
+; CHECKIZFINX-NEXT: seqz a0, a0
+; CHECKIZFINX-NEXT: addi a0, a0, -1
+; CHECKIZFINX-NEXT: and a0, a0, a1
+; CHECKIZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i32 @llvm.fptoui.sat.i32.f32(float %a)
ret i32 %b
@@ -704,6 +1260,49 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV64IF-NEXT: addi a1, a1, -1
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: mv s0, a0
+; RV32IZFINX-NEXT: lui a0, 307200
+; RV32IZFINX-NEXT: fabs.s a1, s0
+; RV32IZFINX-NEXT: flt.s a0, a1, a0
+; RV32IZFINX-NEXT: beqz a0, .LBB19_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a0, s0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a0, a0, rne
+; RV32IZFINX-NEXT: fsgnj.s s0, a0, s0
+; RV32IZFINX-NEXT: .LBB19_2:
+; RV32IZFINX-NEXT: fle.s a0, zero, s0
+; RV32IZFINX-NEXT: neg s1, a0
+; RV32IZFINX-NEXT: mv a0, s0
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lui a2, %hi(.LCPI19_0)
+; RV32IZFINX-NEXT: lw a2, %lo(.LCPI19_0)(a2)
+; RV32IZFINX-NEXT: and a0, s1, a0
+; RV32IZFINX-NEXT: flt.s a2, a2, s0
+; RV32IZFINX-NEXT: neg a2, a2
+; RV32IZFINX-NEXT: or a0, a2, a0
+; RV32IZFINX-NEXT: and a1, s1, a1
+; RV32IZFINX-NEXT: or a1, a2, a1
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a1, a0, rne
+; RV64IZFINX-NEXT: feq.s a0, a0, a0
+; RV64IZFINX-NEXT: seqz a0, a0
+; RV64IZFINX-NEXT: addi a0, a0, -1
+; RV64IZFINX-NEXT: and a0, a0, a1
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = call i64 @llvm.fptoui.sat.i64.f32(float %a)
ret i64 %b
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv.ll b/llvm/test/CodeGen/RISCV/float-round-conv.ll
index 344950df43df..ed50f867cdb8 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck -check-prefix=RV32IZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck -check-prefix=RV64IZFINX %s
define signext i8 @test_floor_si8(float %x) {
; RV32IF-LABEL: test_floor_si8:
@@ -14,6 +18,16 @@ define signext i8 @test_floor_si8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_si8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_si8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
@@ -29,6 +43,16 @@ define signext i16 @test_floor_si16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_si16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_si16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
@@ -44,6 +68,16 @@ define signext i32 @test_floor_si32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_si32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_si32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
@@ -75,6 +109,31 @@ define i64 @test_floor_si64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB3_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB3_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
@@ -90,6 +149,16 @@ define zeroext i8 @test_floor_ui8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_ui8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_ui8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
@@ -105,6 +174,16 @@ define zeroext i16 @test_floor_ui16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_ui16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_ui16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
@@ -120,6 +199,16 @@ define signext i32 @test_floor_ui32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_ui32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_ui32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
@@ -151,6 +240,31 @@ define i64 @test_floor_ui64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB7_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB7_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rdn
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
@@ -166,6 +280,16 @@ define signext i8 @test_ceil_si8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_si8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_si8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
@@ -181,6 +305,16 @@ define signext i16 @test_ceil_si16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_si16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_si16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
@@ -196,6 +330,16 @@ define signext i32 @test_ceil_si32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_si32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_si32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
@@ -227,6 +371,31 @@ define i64 @test_ceil_si64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB11_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB11_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
@@ -242,6 +411,16 @@ define zeroext i8 @test_ceil_ui8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_ui8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_ui8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
@@ -257,6 +436,16 @@ define zeroext i16 @test_ceil_ui16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_ui16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_ui16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
@@ -272,6 +461,16 @@ define signext i32 @test_ceil_ui32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_ui32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rup
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_ui32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
@@ -303,6 +502,31 @@ define i64 @test_ceil_ui64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB15_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB15_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rup
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
@@ -318,6 +542,16 @@ define signext i8 @test_trunc_si8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_si8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_si8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
@@ -333,6 +567,16 @@ define signext i16 @test_trunc_si16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_si16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_si16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
@@ -348,6 +592,16 @@ define signext i32 @test_trunc_si32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_si32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_si32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
@@ -379,6 +633,31 @@ define i64 @test_trunc_si64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB19_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB19_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
@@ -394,6 +673,16 @@ define zeroext i8 @test_trunc_ui8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_ui8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_ui8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
@@ -409,6 +698,16 @@ define zeroext i16 @test_trunc_ui16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_ui16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_ui16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
@@ -424,6 +723,16 @@ define signext i32 @test_trunc_ui32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_ui32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_ui32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
@@ -455,6 +764,31 @@ define i64 @test_trunc_ui64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB23_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB23_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
@@ -470,6 +804,16 @@ define signext i8 @test_round_si8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_si8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_si8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
@@ -485,6 +829,16 @@ define signext i16 @test_round_si16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_si16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_si16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
@@ -500,6 +854,16 @@ define signext i32 @test_round_si32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_si32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_si32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
@@ -531,6 +895,31 @@ define i64 @test_round_si64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB27_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB27_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
@@ -546,6 +935,16 @@ define zeroext i8 @test_round_ui8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_ui8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_ui8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
@@ -561,6 +960,16 @@ define zeroext i16 @test_round_ui16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_ui16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_ui16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
@@ -576,6 +985,16 @@ define signext i32 @test_round_ui32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_ui32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_ui32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
@@ -607,6 +1026,31 @@ define i64 @test_round_ui64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB31_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB31_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rmm
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
@@ -622,6 +1066,16 @@ define signext i8 @test_roundeven_si8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_si8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_si8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
@@ -637,6 +1091,16 @@ define signext i16 @test_roundeven_si16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_si16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_si16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
@@ -652,6 +1116,16 @@ define signext i32 @test_roundeven_si32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_si32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.w.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_si32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
@@ -683,6 +1157,31 @@ define i64 @test_roundeven_si64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_si64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB35_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB35_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixsfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_si64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.l.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
@@ -698,6 +1197,16 @@ define zeroext i8 @test_roundeven_ui8(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_ui8:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_ui8:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
@@ -713,6 +1222,16 @@ define zeroext i16 @test_roundeven_ui16(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_ui16:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_ui16:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
@@ -728,6 +1247,16 @@ define signext i32 @test_roundeven_ui32(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_ui32:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: fcvt.wu.s a0, a0, rne
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_ui32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
@@ -759,6 +1288,31 @@ define i64 @test_roundeven_ui64(float %x) {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_ui64:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB39_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB39_2:
+; RV32IZFINX-NEXT: addi sp, sp, -16
+; RV32IZFINX-NEXT: .cfi_def_cfa_offset 16
+; RV32IZFINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFINX-NEXT: .cfi_offset ra, -4
+; RV32IZFINX-NEXT: call __fixunssfdi at plt
+; RV32IZFINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFINX-NEXT: addi sp, sp, 16
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_ui64:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rne
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
@@ -813,6 +1367,32 @@ define float @test_floor_float(float %x) {
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB40_2:
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_floor_float:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB40_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB40_2:
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_floor_float:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB40_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rdn
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rdn
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB40_2:
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
ret float %a
}
@@ -866,6 +1446,32 @@ define float @test_ceil_float(float %x) {
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB41_2:
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_ceil_float:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB41_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB41_2:
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_ceil_float:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB41_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rup
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rup
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB41_2:
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
ret float %a
}
@@ -919,6 +1525,32 @@ define float @test_trunc_float(float %x) {
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB42_2:
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_trunc_float:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB42_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB42_2:
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_trunc_float:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB42_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rtz
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rtz
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB42_2:
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
ret float %a
}
@@ -972,6 +1604,32 @@ define float @test_round_float(float %x) {
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB43_2:
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_round_float:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB43_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB43_2:
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_round_float:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB43_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rmm
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rmm
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB43_2:
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.round.f32(float %x)
ret float %a
}
@@ -1025,6 +1683,32 @@ define float @test_roundeven_float(float %x) {
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB44_2:
; RV64IF-NEXT: ret
+;
+; RV32IZFINX-LABEL: test_roundeven_float:
+; RV32IZFINX: # %bb.0:
+; RV32IZFINX-NEXT: lui a1, 307200
+; RV32IZFINX-NEXT: fabs.s a2, a0
+; RV32IZFINX-NEXT: flt.s a1, a2, a1
+; RV32IZFINX-NEXT: beqz a1, .LBB44_2
+; RV32IZFINX-NEXT: # %bb.1:
+; RV32IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV32IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV32IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV32IZFINX-NEXT: .LBB44_2:
+; RV32IZFINX-NEXT: ret
+;
+; RV64IZFINX-LABEL: test_roundeven_float:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: lui a1, 307200
+; RV64IZFINX-NEXT: fabs.s a2, a0
+; RV64IZFINX-NEXT: flt.s a1, a2, a1
+; RV64IZFINX-NEXT: beqz a1, .LBB44_2
+; RV64IZFINX-NEXT: # %bb.1:
+; RV64IZFINX-NEXT: fcvt.w.s a1, a0, rne
+; RV64IZFINX-NEXT: fcvt.s.w a1, a1, rne
+; RV64IZFINX-NEXT: fsgnj.s a0, a1, a0
+; RV64IZFINX-NEXT: .LBB44_2:
+; RV64IZFINX-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
index c6e883624f5a..a2ff0d33e2d3 100644
--- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll
@@ -3,12 +3,21 @@
; RUN: -target-abi=ilp32f | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck --check-prefix=CHECKZFINX %s
define float @select_fcmp_false(float %a, float %b) nounwind {
; CHECK-LABEL: select_fcmp_false:
; CHECK: # %bb.0:
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_false:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: ret
%1 = fcmp false float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -23,6 +32,15 @@ define float @select_fcmp_oeq(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_oeq:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a2, a0, a1
+; CHECKZFINX-NEXT: bnez a2, .LBB1_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB1_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp oeq float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -37,6 +55,15 @@ define float @select_fcmp_ogt(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ogt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a1, a0
+; CHECKZFINX-NEXT: bnez a2, .LBB2_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB2_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ogt float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -51,6 +78,15 @@ define float @select_fcmp_oge(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB3_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_oge:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a2, a1, a0
+; CHECKZFINX-NEXT: bnez a2, .LBB3_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB3_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp oge float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -65,6 +101,15 @@ define float @select_fcmp_olt(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB4_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_olt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a0, a1
+; CHECKZFINX-NEXT: bnez a2, .LBB4_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB4_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp olt float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -79,6 +124,15 @@ define float @select_fcmp_ole(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB5_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ole:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a2, a0, a1
+; CHECKZFINX-NEXT: bnez a2, .LBB5_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB5_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ole float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -95,6 +149,17 @@ define float @select_fcmp_one(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB6_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_one:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a0, a1
+; CHECKZFINX-NEXT: flt.s a3, a1, a0
+; CHECKZFINX-NEXT: or a2, a3, a2
+; CHECKZFINX-NEXT: bnez a2, .LBB6_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB6_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp one float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -111,6 +176,17 @@ define float @select_fcmp_ord(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB7_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ord:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a2, a1, a1
+; CHECKZFINX-NEXT: feq.s a3, a0, a0
+; CHECKZFINX-NEXT: and a2, a3, a2
+; CHECKZFINX-NEXT: bnez a2, .LBB7_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB7_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ord float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -127,6 +203,17 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ueq:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a0, a1
+; CHECKZFINX-NEXT: flt.s a3, a1, a0
+; CHECKZFINX-NEXT: or a2, a3, a2
+; CHECKZFINX-NEXT: beqz a2, .LBB8_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB8_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ueq float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -141,6 +228,15 @@ define float @select_fcmp_ugt(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB9_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ugt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a2, a0, a1
+; CHECKZFINX-NEXT: beqz a2, .LBB9_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB9_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ugt float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -155,6 +251,15 @@ define float @select_fcmp_uge(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB10_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_uge:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a0, a1
+; CHECKZFINX-NEXT: beqz a2, .LBB10_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB10_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp uge float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -169,6 +274,15 @@ define float @select_fcmp_ult(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB11_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ult:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a2, a1, a0
+; CHECKZFINX-NEXT: beqz a2, .LBB11_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB11_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ult float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -183,6 +297,15 @@ define float @select_fcmp_ule(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB12_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_ule:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: flt.s a2, a1, a0
+; CHECKZFINX-NEXT: beqz a2, .LBB12_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB12_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp ule float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -197,6 +320,15 @@ define float @select_fcmp_une(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB13_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_une:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a2, a0, a1
+; CHECKZFINX-NEXT: beqz a2, .LBB13_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB13_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp une float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -213,6 +345,17 @@ define float @select_fcmp_uno(float %a, float %b) nounwind {
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB14_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_uno:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a2, a1, a1
+; CHECKZFINX-NEXT: feq.s a3, a0, a0
+; CHECKZFINX-NEXT: and a2, a3, a2
+; CHECKZFINX-NEXT: beqz a2, .LBB14_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a1
+; CHECKZFINX-NEXT: .LBB14_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp uno float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -222,6 +365,10 @@ define float @select_fcmp_true(float %a, float %b) nounwind {
; CHECK-LABEL: select_fcmp_true:
; CHECK: # %bb.0:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_true:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: ret
%1 = fcmp true float %a, %b
%2 = select i1 %1, float %a, float %b
ret float %2
@@ -237,6 +384,16 @@ define i32 @i32_select_fcmp_oeq(float %a, float %b, i32 %c, i32 %d) nounwind {
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: i32_select_fcmp_oeq:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a1, a0, a1
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: bnez a1, .LBB16_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a0, a3
+; CHECKZFINX-NEXT: .LBB16_2:
+; CHECKZFINX-NEXT: ret
%1 = fcmp oeq float %a, %b
%2 = select i1 %1, i32 %c, i32 %d
ret i32 %2
@@ -249,6 +406,13 @@ define i32 @select_fcmp_oeq_1_2(float %a, float %b) {
; CHECK-NEXT: li a1, 2
; CHECK-NEXT: sub a0, a1, a0
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_oeq_1_2:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: feq.s a0, a0, a1
+; CHECKZFINX-NEXT: li a1, 2
+; CHECKZFINX-NEXT: sub a0, a1, a0
+; CHECKZFINX-NEXT: ret
%1 = fcmp fast oeq float %a, %b
%2 = select i1 %1, i32 1, i32 2
ret i32 %2
@@ -260,6 +424,12 @@ define signext i32 @select_fcmp_uge_negone_zero(float %a, float %b) nounwind {
; CHECK-NEXT: fle.s a0, fa0, fa1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_uge_negone_zero:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a0, a0, a1
+; CHECKZFINX-NEXT: addi a0, a0, -1
+; CHECKZFINX-NEXT: ret
%1 = fcmp ugt float %a, %b
%2 = select i1 %1, i32 -1, i32 0
ret i32 %2
@@ -271,6 +441,12 @@ define signext i32 @select_fcmp_uge_1_2(float %a, float %b) nounwind {
; CHECK-NEXT: fle.s a0, fa0, fa1
; CHECK-NEXT: addi a0, a0, 1
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_fcmp_uge_1_2:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: fle.s a0, a0, a1
+; CHECKZFINX-NEXT: addi a0, a0, 1
+; CHECKZFINX-NEXT: ret
%1 = fcmp ugt float %a, %b
%2 = select i1 %1, i32 1, i32 2
ret i32 %2
diff --git a/llvm/test/CodeGen/RISCV/float-select-icmp.ll b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
index 52fa76b1b28c..f46f25f777ca 100644
--- a/llvm/test/CodeGen/RISCV/float-select-icmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-select-icmp.ll
@@ -3,6 +3,10 @@
; RUN: -target-abi=ilp32f | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=ilp32 | FileCheck --check-prefix=CHECKZFINX %s
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -verify-machineinstrs < %s \
+; RUN: -target-abi=lp64 | FileCheck --check-prefix=CHECKZFINX %s
define float @select_icmp_eq(i32 signext %a, i32 signext %b, float %c, float %d) {
; CHECK-LABEL: select_icmp_eq:
@@ -12,6 +16,15 @@ define float @select_icmp_eq(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_eq:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: beq a0, a1, .LBB0_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB0_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp eq i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -25,6 +38,15 @@ define float @select_icmp_ne(i32 signext %a, i32 signext %b, float %c, float %d)
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_ne:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bne a0, a1, .LBB1_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB1_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp ne i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -38,6 +60,15 @@ define float @select_icmp_ugt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_ugt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bltu a1, a0, .LBB2_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB2_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp ugt i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -51,6 +82,15 @@ define float @select_icmp_uge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB3_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_uge:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bgeu a0, a1, .LBB3_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB3_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp uge i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -64,6 +104,15 @@ define float @select_icmp_ult(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB4_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_ult:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bltu a0, a1, .LBB4_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB4_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp ult i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -77,6 +126,15 @@ define float @select_icmp_ule(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB5_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_ule:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bgeu a1, a0, .LBB5_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB5_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp ule i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -90,6 +148,15 @@ define float @select_icmp_sgt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB6_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_sgt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: blt a1, a0, .LBB6_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB6_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp sgt i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -103,6 +170,15 @@ define float @select_icmp_sge(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB7_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_sge:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bge a0, a1, .LBB7_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB7_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp sge i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -116,6 +192,15 @@ define float @select_icmp_slt(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_slt:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: blt a0, a1, .LBB8_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB8_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp slt i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
@@ -129,6 +214,15 @@ define float @select_icmp_sle(i32 signext %a, i32 signext %b, float %c, float %d
; CHECK-NEXT: fmv.s fa0, fa1
; CHECK-NEXT: .LBB9_2:
; CHECK-NEXT: ret
+;
+; CHECKZFINX-LABEL: select_icmp_sle:
+; CHECKZFINX: # %bb.0:
+; CHECKZFINX-NEXT: bge a1, a0, .LBB9_2
+; CHECKZFINX-NEXT: # %bb.1:
+; CHECKZFINX-NEXT: mv a2, a3
+; CHECKZFINX-NEXT: .LBB9_2:
+; CHECKZFINX-NEXT: mv a0, a2
+; CHECKZFINX-NEXT: ret
%1 = icmp sle i32 %a, %b
%2 = select i1 %1, float %c, float %d
ret float %2
diff --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
index dbc864c79238..72d5454a3a69 100644
--- a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi=lp64f -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64IF
+; RUN: llc -mtriple=riscv64 -mattr=+zfinx -target-abi=lp64 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s -check-prefix=RV64IZFINX
; This file exhaustively checks float<->i32 conversions. In general,
; fcvt.l[u].s can be selected instead of fcvt.w[u].s because poison is
@@ -12,6 +14,11 @@ define i32 @aext_fptosi(float %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: aext_fptosi:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%1 = fptosi float %a to i32
ret i32 %1
}
@@ -21,6 +28,11 @@ define signext i32 @sext_fptosi(float %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: sext_fptosi:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%1 = fptosi float %a to i32
ret i32 %1
}
@@ -32,6 +44,13 @@ define zeroext i32 @zext_fptosi(float %a) nounwind {
; RV64IF-NEXT: slli a0, a0, 32
; RV64IF-NEXT: srli a0, a0, 32
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: zext_fptosi:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.w.s a0, a0, rtz
+; RV64IZFINX-NEXT: slli a0, a0, 32
+; RV64IZFINX-NEXT: srli a0, a0, 32
+; RV64IZFINX-NEXT: ret
%1 = fptosi float %a to i32
ret i32 %1
}
@@ -41,6 +60,11 @@ define i32 @aext_fptoui(float %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: aext_fptoui:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%1 = fptoui float %a to i32
ret i32 %1
}
@@ -50,6 +74,11 @@ define signext i32 @sext_fptoui(float %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: sext_fptoui:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.wu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%1 = fptoui float %a to i32
ret i32 %1
}
@@ -59,6 +88,11 @@ define zeroext i32 @zext_fptoui(float %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: zext_fptoui:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.lu.s a0, a0, rtz
+; RV64IZFINX-NEXT: ret
%1 = fptoui float %a to i32
ret i32 %1
}
@@ -69,6 +103,11 @@ define i32 @bcvt_f32_to_aext_i32(float %a, float %b) nounwind {
; RV64IF-NEXT: fadd.s fa5, fa0, fa1
; RV64IF-NEXT: fmv.x.w a0, fa5
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: bcvt_f32_to_aext_i32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fadd.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
%1 = fadd float %a, %b
%2 = bitcast float %1 to i32
ret i32 %2
@@ -80,6 +119,12 @@ define signext i32 @bcvt_f32_to_sext_i32(float %a, float %b) nounwind {
; RV64IF-NEXT: fadd.s fa5, fa0, fa1
; RV64IF-NEXT: fmv.x.w a0, fa5
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: bcvt_f32_to_sext_i32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fadd.s a0, a0, a1
+; RV64IZFINX-NEXT: sext.w a0, a0
+; RV64IZFINX-NEXT: ret
%1 = fadd float %a, %b
%2 = bitcast float %1 to i32
ret i32 %2
@@ -93,6 +138,13 @@ define zeroext i32 @bcvt_f32_to_zext_i32(float %a, float %b) nounwind {
; RV64IF-NEXT: slli a0, a0, 32
; RV64IF-NEXT: srli a0, a0, 32
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: bcvt_f32_to_zext_i32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fadd.s a0, a0, a1
+; RV64IZFINX-NEXT: slli a0, a0, 32
+; RV64IZFINX-NEXT: srli a0, a0, 32
+; RV64IZFINX-NEXT: ret
%1 = fadd float %a, %b
%2 = bitcast float %1 to i32
ret i32 %2
@@ -105,6 +157,11 @@ define float @bcvt_i64_to_f32_via_i32(i64 %a, i64 %b) nounwind {
; RV64IF-NEXT: fmv.w.x fa4, a1
; RV64IF-NEXT: fadd.s fa0, fa5, fa4
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: bcvt_i64_to_f32_via_i32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fadd.s a0, a0, a1
+; RV64IZFINX-NEXT: ret
%1 = trunc i64 %a to i32
%2 = trunc i64 %b to i32
%3 = bitcast i32 %1 to float
@@ -118,6 +175,11 @@ define float @uitofp_aext_i32_to_f32(i32 %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.wu fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: uitofp_aext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV64IZFINX-NEXT: ret
%1 = uitofp i32 %a to float
ret float %1
}
@@ -127,6 +189,11 @@ define float @uitofp_sext_i32_to_f32(i32 signext %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.wu fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: uitofp_sext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV64IZFINX-NEXT: ret
%1 = uitofp i32 %a to float
ret float %1
}
@@ -136,6 +203,11 @@ define float @uitofp_zext_i32_to_f32(i32 zeroext %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.wu fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: uitofp_zext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.wu a0, a0
+; RV64IZFINX-NEXT: ret
%1 = uitofp i32 %a to float
ret float %1
}
@@ -145,6 +217,11 @@ define float @sitofp_aext_i32_to_f32(i32 %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.w fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: sitofp_aext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.w a0, a0
+; RV64IZFINX-NEXT: ret
%1 = sitofp i32 %a to float
ret float %1
}
@@ -154,6 +231,11 @@ define float @sitofp_sext_i32_to_f32(i32 signext %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.w fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: sitofp_sext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.w a0, a0
+; RV64IZFINX-NEXT: ret
%1 = sitofp i32 %a to float
ret float %1
}
@@ -163,6 +245,11 @@ define float @sitofp_zext_i32_to_f32(i32 zeroext %a) nounwind {
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.s.w fa0, a0
; RV64IF-NEXT: ret
+;
+; RV64IZFINX-LABEL: sitofp_zext_i32_to_f32:
+; RV64IZFINX: # %bb.0:
+; RV64IZFINX-NEXT: fcvt.s.w a0, a0
+; RV64IZFINX-NEXT: ret
%1 = sitofp i32 %a to float
ret float %1
}
More information about the llvm-commits
mailing list