[llvm] 5b869f3 - [RISCV] Custom lowering of llvm.is.fpclass

via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 25 19:17:24 PDT 2023


Author: LiaoChunyu
Date: 2023-04-26T10:17:18+08:00
New Revision: 5b869f3e2af485b2b97368e36dbff1c4e28c0d7e

URL: https://github.com/llvm/llvm-project/commit/5b869f3e2af485b2b97368e36dbff1c4e28c0d7e
DIFF: https://github.com/llvm/llvm-project/commit/5b869f3e2af485b2b97368e36dbff1c4e28c0d7e.diff

LOG: [RISCV] Custom lowering of llvm.is.fpclass

This patch supports FCLASS.S, FCLASS.H and FCLASS.D.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D149063

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h
    llvm/lib/Target/RISCV/RISCVInstrInfo.h
    llvm/lib/Target/RISCV/RISCVInstrInfoD.td
    llvm/lib/Target/RISCV/RISCVInstrInfoF.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
    llvm/test/CodeGen/RISCV/double-intrinsics.ll
    llvm/test/CodeGen/RISCV/float-intrinsics.ll
    llvm/test/CodeGen/RISCV/half-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 22bca318c897b..efe14933e9297 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -359,6 +359,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(FPRndMode, MVT::f16,
                          Subtarget.hasStdExtZfa() ? Legal : Custom);
       setOperationAction(ISD::SELECT, MVT::f16, Custom);
+      setOperationAction(ISD::IS_FPCLASS, MVT::f16, Custom);
     } else {
       static const unsigned ZfhminPromoteOps[] = {
           ISD::FMINNUM,      ISD::FMAXNUM,       ISD::FADD,
@@ -417,6 +418,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(FPOpToExpand, MVT::f32, Expand);
     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+    setOperationAction(ISD::IS_FPCLASS, MVT::f32, Custom);
 
     if (Subtarget.hasStdExtZfa())
       setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
@@ -450,6 +452,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
     setOperationAction(FPOpToExpand, MVT::f64, Expand);
     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+    setOperationAction(ISD::IS_FPCLASS, MVT::f64, Custom);
   }
 
   if (Subtarget.is64Bit()) {
@@ -4168,6 +4171,42 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
   return Op;
 }
 
+static SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG,
+                               const RISCVSubtarget &Subtarget) {
+  SDLoc DL(Op);
+  MVT VT = Op.getSimpleValueType();
+  MVT XLenVT = Subtarget.getXLenVT();
+  auto CNode = cast<ConstantSDNode>(Op.getOperand(1));
+  unsigned Check = CNode->getZExtValue();
+  unsigned TDCMask = 0;
+  if (Check & fcSNan)
+    TDCMask |= RISCV::FPMASK_Signaling_NaN;
+  if (Check & fcQNan)
+    TDCMask |= RISCV::FPMASK_Quiet_NaN;
+  if (Check & fcPosInf)
+    TDCMask |= RISCV::FPMASK_Positive_Infinity;
+  if (Check & fcNegInf)
+    TDCMask |= RISCV::FPMASK_Negative_Infinity;
+  if (Check & fcPosNormal)
+    TDCMask |= RISCV::FPMASK_Positive_Normal;
+  if (Check & fcNegNormal)
+    TDCMask |= RISCV::FPMASK_Negative_Normal;
+  if (Check & fcPosSubnormal)
+    TDCMask |= RISCV::FPMASK_Positive_Subnormal;
+  if (Check & fcNegSubnormal)
+    TDCMask |= RISCV::FPMASK_Negative_Subnormal;
+  if (Check & fcPosZero)
+    TDCMask |= RISCV::FPMASK_Positive_Zero;
+  if (Check & fcNegZero)
+    TDCMask |= RISCV::FPMASK_Negative_Zero;
+
+  SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, XLenVT);
+  SDValue FPCLASS = DAG.getNode(RISCVISD::FPCLASS, DL, VT, Op.getOperand(0));
+  SDValue AND = DAG.getNode(ISD::AND, DL, VT, FPCLASS, TDCMaskV);
+  return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, XLenVT),
+                      ISD::CondCode::SETNE);
+}
+
 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
                                             SelectionDAG &DAG) const {
   switch (Op.getOpcode()) {
@@ -4279,6 +4318,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return LowerINTRINSIC_W_CHAIN(Op, DAG);
   case ISD::INTRINSIC_VOID:
     return LowerINTRINSIC_VOID(Op, DAG);
+  case ISD::IS_FPCLASS:
+    return LowerIS_FPCLASS(Op, DAG, Subtarget);
   case ISD::BITREVERSE: {
     MVT VT = Op.getSimpleValueType();
     SDLoc DL(Op);
@@ -14609,6 +14650,7 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
   NODE_NAME_CASE(FROUND)
+  NODE_NAME_CASE(FPCLASS)
   NODE_NAME_CASE(READ_CYCLE_WIDE)
   NODE_NAME_CASE(BREV8)
   NODE_NAME_CASE(ORC_B)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 3780986407601..ddfe319efea90 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -119,6 +119,7 @@ enum NodeType : unsigned {
   // inserter.
   FROUND,
 
+  FPCLASS,
   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
   // (returns (Lo, Hi)). It takes a chain operand.
   READ_CYCLE_WIDE,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index 9810f73930ca6..8f0f164263be9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -265,6 +265,17 @@ bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2);
 // Special immediate for AVL operand of V pseudo instructions to indicate VLMax.
 static constexpr int64_t VLMaxSentinel = -1LL;
 
+// Mask assignments for floating-point
+static constexpr unsigned FPMASK_Negative_Infinity = 0x001;
+static constexpr unsigned FPMASK_Negative_Normal = 0x002;
+static constexpr unsigned FPMASK_Negative_Subnormal = 0x004;
+static constexpr unsigned FPMASK_Negative_Zero = 0x008;
+static constexpr unsigned FPMASK_Positive_Zero = 0x010;
+static constexpr unsigned FPMASK_Positive_Subnormal = 0x020;
+static constexpr unsigned FPMASK_Positive_Normal = 0x040;
+static constexpr unsigned FPMASK_Positive_Infinity = 0x080;
+static constexpr unsigned FPMASK_Signaling_NaN = 0x100;
+static constexpr unsigned FPMASK_Quiet_NaN = 0x200;
 } // namespace RISCV
 
 namespace RISCVVPseudosTable {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
index 961f5e9ebb3ae..b2fba9d620755 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td
@@ -257,6 +257,8 @@ def : Pat<(any_fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, FRM_DYN)>;
 def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>;
 def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>;
 
+def : Pat<(riscv_fpclass FPR64:$rs1), (FCLASS_D $rs1)>;
+
 def : PatFprFpr<fcopysign, FSGNJ_D, FPR64>;
 def : Pat<(fcopysign FPR64:$rs1, (fneg FPR64:$rs2)), (FSGNJN_D $rs1, $rs2)>;
 def : Pat<(fcopysign FPR64:$rs1, FPR32:$rs2), (FSGNJ_D $rs1, (FCVT_D_S $rs2))>;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
index aa5d0afd69cfe..0107c66289ff3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td
@@ -29,6 +29,11 @@ def SDT_RISCVFCVT_X
 def SDT_RISCVFROUND
     : SDTypeProfile<1, 3, [SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
                            SDTCisVT<3, XLenVT>]>;
+def SDT_RISCVFPCLASS
+    : SDTypeProfile<1, 1, [SDTCisVT<0, XLenVT>, SDTCisFP<1>]>;
+
+def riscv_fpclass
+    : SDNode<"RISCVISD::FPCLASS", SDT_RISCVFPCLASS>;
 
 def riscv_fround
     : SDNode<"RISCVISD::FROUND", SDT_RISCVFROUND>;
@@ -475,6 +480,8 @@ def : Pat<(any_fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, FRM_DYN)>;
 def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>;
 def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>;
 
+def : Pat<(riscv_fpclass FPR32:$rs1), (FCLASS_S $rs1)>;
+
 def : PatFprFpr<fcopysign, FSGNJ_S, FPR32>;
 def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>;
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
index 5bda568c3cd0b..8bcc1d713a902 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td
@@ -252,6 +252,8 @@ def : Pat<(any_fsqrt FPR16:$rs1), (FSQRT_H FPR16:$rs1, FRM_DYN)>;
 def : Pat<(fneg FPR16:$rs1), (FSGNJN_H $rs1, $rs1)>;
 def : Pat<(fabs FPR16:$rs1), (FSGNJX_H $rs1, $rs1)>;
 
+def : Pat<(riscv_fpclass FPR16:$rs1), (FCLASS_H $rs1)>;
+
 def : PatFprFpr<fcopysign, FSGNJ_H, FPR16>;
 def : Pat<(fcopysign FPR16:$rs1, (fneg FPR16:$rs2)), (FSGNJN_H $rs1, $rs2)>;
 def : Pat<(fcopysign FPR16:$rs1, FPR32:$rs2),

diff  --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index 74cd579e2c4f3..38aea142b9100 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -1044,3 +1044,37 @@ define i64 @llround_f64(double %a) nounwind {
   %1 = call i64 @llvm.llround.i64.f64(double %a)
   ret i64 %1
 }
+
+declare i1 @llvm.is.fpclass.f64(double, i32)
+define i1 @isnan_d_fpclass(double %x) {
+; CHECKIFD-LABEL: isnan_d_fpclass:
+; CHECKIFD:       # %bb.0:
+; CHECKIFD-NEXT:    fclass.d a0, fa0
+; CHECKIFD-NEXT:    andi a0, a0, 768
+; CHECKIFD-NEXT:    snez a0, a0
+; CHECKIFD-NEXT:    ret
+;
+; RV32I-LABEL: isnan_d_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a1, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    lui a2, 524032
+; RV32I-NEXT:    beq a1, a2, .LBB29_2
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    slt a0, a2, a1
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB29_2:
+; RV32I-NEXT:    snez a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isnan_d_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 1
+; RV64I-NEXT:    srli a0, a0, 1
+; RV64I-NEXT:    li a1, 2047
+; RV64I-NEXT:    slli a1, a1, 52
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f64(double %x, i32 3)  ; nan
+  ret i1 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
index 8ae7ce4256d29..44ba6b54fe7fc 100644
--- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll
@@ -1157,3 +1157,424 @@ define i64 @llround_f32(float %a) nounwind {
   %1 = call i64 @llvm.llround.i64.f32(float %a)
   ret i64 %1
 }
+
+declare i1 @llvm.is.fpclass.f32(float, i32)
+define i1 @fpclass(float %x) {
+; RV32IF-LABEL: fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 927
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 927
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    addi a2, a1, -1
+; RV32I-NEXT:    lui a3, 2048
+; RV32I-NEXT:    addi a3, a3, -1
+; RV32I-NEXT:    sltu a2, a2, a3
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    and a2, a2, a0
+; RV32I-NEXT:    seqz a3, a1
+; RV32I-NEXT:    lui a4, 522240
+; RV32I-NEXT:    xor a5, a1, a4
+; RV32I-NEXT:    seqz a5, a5
+; RV32I-NEXT:    or a3, a3, a5
+; RV32I-NEXT:    or a2, a3, a2
+; RV32I-NEXT:    slt a3, a4, a1
+; RV32I-NEXT:    or a2, a2, a3
+; RV32I-NEXT:    lui a3, 1046528
+; RV32I-NEXT:    add a1, a1, a3
+; RV32I-NEXT:    srli a1, a1, 24
+; RV32I-NEXT:    sltiu a1, a1, 127
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    or a0, a2, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    addi a2, a0, -1
+; RV64I-NEXT:    lui a3, 2048
+; RV64I-NEXT:    addiw a3, a3, -1
+; RV64I-NEXT:    sltu a2, a2, a3
+; RV64I-NEXT:    slti a1, a1, 0
+; RV64I-NEXT:    and a2, a2, a1
+; RV64I-NEXT:    seqz a3, a0
+; RV64I-NEXT:    lui a4, 522240
+; RV64I-NEXT:    xor a5, a0, a4
+; RV64I-NEXT:    seqz a5, a5
+; RV64I-NEXT:    or a3, a3, a5
+; RV64I-NEXT:    or a2, a3, a2
+; RV64I-NEXT:    slt a3, a4, a0
+; RV64I-NEXT:    or a2, a2, a3
+; RV64I-NEXT:    lui a3, 1046528
+; RV64I-NEXT:    add a0, a0, a3
+; RV64I-NEXT:    srliw a0, a0, 24
+; RV64I-NEXT:    sltiu a0, a0, 127
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    or a0, a2, a0
+; RV64I-NEXT:    ret
+  %cmp = call i1 @llvm.is.fpclass.f32(float %x, i32 639)
+  ret i1 %cmp
+}
+
+define i1 @isnan_fpclass(float %x) {
+; RV32IF-LABEL: isnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 768
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 768
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isnan_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isnan_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 3)  ; nan
+  ret i1 %1
+}
+
+define i1 @isqnan_fpclass(float %x) {
+; RV32IF-LABEL: isqnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    slli a0, a0, 22
+; RV32IF-NEXT:    srli a0, a0, 31
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isqnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    slli a0, a0, 54
+; RV64IF-NEXT:    srli a0, a0, 63
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isqnan_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 523264
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isqnan_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 523264
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 2)  ; qnan
+  ret i1 %1
+}
+
+define i1 @issnan_fpclass(float %x) {
+; RV32IF-LABEL: issnan_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    slli a0, a0, 23
+; RV32IF-NEXT:    srli a0, a0, 31
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: issnan_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    slli a0, a0, 55
+; RV64IF-NEXT:    srli a0, a0, 63
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: issnan_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 523264
+; RV32I-NEXT:    slt a1, a0, a1
+; RV32I-NEXT:    lui a2, 522240
+; RV32I-NEXT:    slt a0, a2, a0
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: issnan_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 523264
+; RV64I-NEXT:    slt a1, a0, a1
+; RV64I-NEXT:    lui a2, 522240
+; RV64I-NEXT:    slt a0, a2, a0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 1)  ; snan
+  ret i1 %1
+}
+
+define i1 @isinf_fpclass(float %x) {
+; RV32IF-LABEL: isinf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 129
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isinf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 129
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isinf_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isinf_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 516)  ; 0x204 = "inf"
+  ret i1 %1
+}
+
+define i1 @isposinf_fpclass(float %x) {
+; RV32IF-LABEL: isposinf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    slli a0, a0, 24
+; RV32IF-NEXT:    srli a0, a0, 31
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isposinf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    slli a0, a0, 56
+; RV64IF-NEXT:    srli a0, a0, 63
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isposinf_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isposinf_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 512)  ; 0x200 = "+inf"
+  ret i1 %1
+}
+
+define i1 @isneginf_fpclass(float %x) {
+; RV32IF-LABEL: isneginf_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 1
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isneginf_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 1
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isneginf_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    lui a1, 1046528
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    seqz a0, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isneginf_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a0, a0
+; RV64I-NEXT:    lui a1, 1046528
+; RV64I-NEXT:    xor a0, a0, a1
+; RV64I-NEXT:    seqz a0, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 4)  ; "-inf"
+  ret i1 %1
+}
+
+define i1 @isfinite_fpclass(float %x) {
+; RV32IF-LABEL: isfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 126
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 126
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isfinite_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    slt a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isfinite_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    slt a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 504)  ; 0x1f8 = "finite"
+  ret i1 %1
+}
+
+define i1 @isposfinite_fpclass(float %x) {
+; RV32IF-LABEL: isposfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 112
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isposfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 112
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isposfinite_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    srli a0, a0, 23
+; RV32I-NEXT:    sltiu a0, a0, 255
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isposfinite_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    srliw a0, a0, 23
+; RV64I-NEXT:    sltiu a0, a0, 255
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 448)  ; 0x1c0 = "+finite"
+  ret i1 %1
+}
+
+define i1 @isnegfinite_fpclass(float %x) {
+; RV32IF-LABEL: isnegfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 14
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnegfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 14
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isnegfinite_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a1, a0, 1
+; RV32I-NEXT:    srli a1, a1, 1
+; RV32I-NEXT:    lui a2, 522240
+; RV32I-NEXT:    slt a1, a1, a2
+; RV32I-NEXT:    slti a0, a0, 0
+; RV32I-NEXT:    and a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isnegfinite_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    sext.w a1, a0
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a2, 522240
+; RV64I-NEXT:    slt a0, a0, a2
+; RV64I-NEXT:    slti a1, a1, 0
+; RV64I-NEXT:    and a0, a0, a1
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 56)  ; 0x38 = "-finite"
+  ret i1 %1
+}
+
+define i1 @isnotfinite_fpclass(float %x) {
+; RV32IF-LABEL: isnotfinite_fpclass:
+; RV32IF:       # %bb.0:
+; RV32IF-NEXT:    fclass.s a0, fa0
+; RV32IF-NEXT:    andi a0, a0, 897
+; RV32IF-NEXT:    snez a0, a0
+; RV32IF-NEXT:    ret
+;
+; RV64IF-LABEL: isnotfinite_fpclass:
+; RV64IF:       # %bb.0:
+; RV64IF-NEXT:    fclass.s a0, fa0
+; RV64IF-NEXT:    andi a0, a0, 897
+; RV64IF-NEXT:    snez a0, a0
+; RV64IF-NEXT:    ret
+;
+; RV32I-LABEL: isnotfinite_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 1
+; RV32I-NEXT:    srli a0, a0, 1
+; RV32I-NEXT:    lui a1, 522240
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isnotfinite_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 33
+; RV64I-NEXT:    srli a0, a0, 33
+; RV64I-NEXT:    lui a1, 522240
+; RV64I-NEXT:    addiw a1, a1, -1
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f32(float %x, i32 519)  ; ox207 = "inf|nan"
+  ret i1 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 7183e8aa93144..654bfe094c502 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -1928,3 +1928,53 @@ define half @roundeven_f16(half %a) nounwind {
   %1 = call half @llvm.roundeven.f16(half %a)
   ret half %1
 }
+
+declare i1 @llvm.is.fpclass.f16(half, i32)
+define i1 @isnan_d_fpclass(half %x) {
+; CHECKIZFH-LABEL: isnan_d_fpclass:
+; CHECKIZFH:       # %bb.0:
+; CHECKIZFH-NEXT:    fclass.h a0, fa0
+; CHECKIZFH-NEXT:    andi a0, a0, 768
+; CHECKIZFH-NEXT:    snez a0, a0
+; CHECKIZFH-NEXT:    ret
+;
+; RV32I-LABEL: isnan_d_fpclass:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 17
+; RV32I-NEXT:    srli a0, a0, 17
+; RV32I-NEXT:    li a1, 31
+; RV32I-NEXT:    slli a1, a1, 10
+; RV32I-NEXT:    slt a0, a1, a0
+; RV32I-NEXT:    ret
+;
+; RV64I-LABEL: isnan_d_fpclass:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 49
+; RV64I-NEXT:    srli a0, a0, 49
+; RV64I-NEXT:    li a1, 31
+; RV64I-NEXT:    slli a1, a1, 10
+; RV64I-NEXT:    slt a0, a1, a0
+; RV64I-NEXT:    ret
+;
+; RV32IZFHMIN-LABEL: isnan_d_fpclass:
+; RV32IZFHMIN:       # %bb.0:
+; RV32IZFHMIN-NEXT:    fmv.x.h a0, fa0
+; RV32IZFHMIN-NEXT:    slli a0, a0, 17
+; RV32IZFHMIN-NEXT:    srli a0, a0, 17
+; RV32IZFHMIN-NEXT:    li a1, 31
+; RV32IZFHMIN-NEXT:    slli a1, a1, 10
+; RV32IZFHMIN-NEXT:    slt a0, a1, a0
+; RV32IZFHMIN-NEXT:    ret
+;
+; RV64IZFHMIN-LABEL: isnan_d_fpclass:
+; RV64IZFHMIN:       # %bb.0:
+; RV64IZFHMIN-NEXT:    fmv.x.h a0, fa0
+; RV64IZFHMIN-NEXT:    slli a0, a0, 49
+; RV64IZFHMIN-NEXT:    srli a0, a0, 49
+; RV64IZFHMIN-NEXT:    li a1, 31
+; RV64IZFHMIN-NEXT:    slli a1, a1, 10
+; RV64IZFHMIN-NEXT:    slt a0, a1, a0
+; RV64IZFHMIN-NEXT:    ret
+  %1 = call i1 @llvm.is.fpclass.f16(half %x, i32 3)  ; nan
+  ret i1 %1
+}


        


More information about the llvm-commits mailing list