[llvm] 1b12427 - [VP][RISCV] Add vp.is.fpclass and RISC-V support
via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 25 00:41:04 PDT 2023
Author: LiaoChunyu
Date: 2023-08-25T15:40:55+08:00
New Revision: 1b12427c0133679e3034f572cac353184f90960b
URL: https://github.com/llvm/llvm-project/commit/1b12427c0133679e3034f572cac353184f90960b
DIFF: https://github.com/llvm/llvm-project/commit/1b12427c0133679e3034f572cac353184f90960b.diff
LOG: [VP][RISCV] Add vp.is.fpclass and RISC-V support
There is no vp.fpclass after FCLASS_VL(D151176), try to support vp.fpclass.
Reviewed By: arsenm
Differential Revision: https://reviews.llvm.org/D152993
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
llvm/test/Verifier/llvm.vp.is.fpclass.ll
Modified:
llvm/docs/LangRef.rst
llvm/include/llvm/IR/Intrinsics.td
llvm/include/llvm/IR/VPIntrinsics.def
llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/lib/IR/IntrinsicInst.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
llvm/test/Verifier/vp-intrinsics.ll
llvm/unittests/IR/VPIntrinsicTest.cpp
Removed:
################################################################################
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index f680c82cea0701..2be943c51133d5 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -23126,6 +23126,46 @@ Examples:
%t = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c)
%also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison
+'``llvm.vp.is.fpclass.*``' Intrinsics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic.
+
+::
+
+ declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> <op>, i32 <test>, <vscale x 2 x i1> <mask>, i32 <vector_length>)
+ declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> <op>, i32 <test>, <2 x i1> <mask>, i32 <vector_length>)
+
+Overview:
+"""""""""
+
+Predicated llvm.is.fpclass :ref:`llvm.is.fpclass <llvm.is.fpclass>`
+
+Arguments:
+""""""""""
+
+The first operand is a floating-point vector, the result type is a vector of
+boolean with the same number of elements as the first argument. The second
+operand specifies, which tests to perform :ref:`llvm.is.fpclass <llvm.is.fpclass>`.
+The third operand is the vector mask and has the same number of elements as the
+result vector type. The fourth operand is the explicit vector length of the
+operation.
+
+Semantics:
+""""""""""
+
+The '``llvm.vp.is.fpclass``' intrinsic performs llvm.is.fpclass (:ref:`llvm.is.fpclass <llvm.is.fpclass>`).
+
+
+Examples:
+"""""""""
+
+.. code-block:: llvm
+
+ %r = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl)
+ %t = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> %m, i32 %evl)
.. _int_mload_mstore:
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 696b17901a0fcf..4cca6908991f89 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2180,6 +2180,14 @@ def int_experimental_vp_splice:
llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
+def int_vp_is_fpclass:
+ DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [ llvm_anyvector_ty,
+ llvm_i32_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;
+
//===-------------------------- Masked Intrinsics -------------------------===//
//
def int_masked_load:
diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def
index 61c7a3e5012980..c82f80747288a6 100644
--- a/llvm/include/llvm/IR/VPIntrinsics.def
+++ b/llvm/include/llvm/IR/VPIntrinsics.def
@@ -490,6 +490,10 @@ END_REGISTER_VP_INTRINSIC(vp_icmp)
///// } Comparisons
+// llvm.vp.is.fpclass(on_true,on_false,mask,vlen)
+BEGIN_REGISTER_VP(vp_is_fpclass, 2, 3, VP_IS_FPCLASS, 0)
+END_REGISTER_VP(vp_is_fpclass, VP_IS_FPCLASS)
+
///// Memory Operations {
// llvm.vp.store(val,ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3)
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 00c2d09aa44601..4ca50f2a553af0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7975,6 +7975,16 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
}
break;
}
+ case ISD::VP_IS_FPCLASS: {
+ const DataLayout DLayout = DAG.getDataLayout();
+ EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
+ auto Constant = cast<ConstantSDNode>(OpValues[1])->getZExtValue();
+ SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
+ SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
+ {OpValues[0], Check, OpValues[2], OpValues[3]});
+ setValue(&VPIntrin, V);
+ return;
+ }
case ISD::VP_INTTOPTR: {
SDValue N = OpValues[0];
EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 36d56699c64e93..61be167ebaa28d 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -621,6 +621,9 @@ Function *VPIntrinsic::getDeclarationForParams(Module *M, Intrinsic::ID VPID,
VPFunc =
Intrinsic::getDeclaration(M, VPID, {ReturnType, Params[0]->getType()});
break;
+ case Intrinsic::vp_is_fpclass:
+ VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[0]->getType()});
+ break;
case Intrinsic::vp_merge:
case Intrinsic::vp_select:
VPFunc = Intrinsic::getDeclaration(M, VPID, {Params[1]->getType()});
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index a855b7c8fa6577..c36d45fd55729f 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6078,6 +6078,11 @@ void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
Check(CmpInst::isIntPredicate(Pred),
"invalid predicate for VP integer comparison intrinsic", &VPI);
}
+ if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
+ auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
+ Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
+ "unsupported bits for llvm.vp.is.fpclass test mask");
+ }
}
void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 3e42dbfba4d099..778b3824eda147 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -606,7 +606,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::VP_SQRT, ISD::VP_FMINNUM, ISD::VP_FMAXNUM,
ISD::VP_FCEIL, ISD::VP_FFLOOR, ISD::VP_FROUND,
ISD::VP_FROUNDEVEN, ISD::VP_FCOPYSIGN, ISD::VP_FROUNDTOZERO,
- ISD::VP_FRINT, ISD::VP_FNEARBYINT};
+ ISD::VP_FRINT, ISD::VP_FNEARBYINT, ISD::VP_IS_FPCLASS};
static const unsigned IntegerVecReduceOps[] = {
ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, ISD::VECREDUCE_OR,
@@ -4864,6 +4864,10 @@ SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
if (VT.isScalableVector()) {
MVT DstVT = VT0.changeVectorElementTypeToInteger();
auto [Mask, VL] = getDefaultScalableVLOps(VT0, DL, DAG, Subtarget);
+ if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
+ Mask = Op.getOperand(2);
+ VL = Op.getOperand(3);
+ }
SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, DstVT, Op0, Mask,
VL, Op->getFlags());
if (IsOneBitMask)
@@ -4880,7 +4884,13 @@ SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
MVT ContainerVT = getContainerForFixedLengthVector(VT);
MVT ContainerDstVT = ContainerVT0.changeVectorElementTypeToInteger();
auto [Mask, VL] = getDefaultVLOps(VT0, ContainerVT0, DL, DAG, Subtarget);
-
+ if (Op.getOpcode() == ISD::VP_IS_FPCLASS) {
+ Mask = Op.getOperand(2);
+ MVT MaskContainerVT =
+ getContainerForFixedLengthVector(Mask.getSimpleValueType());
+ Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
+ VL = Op.getOperand(3);
+ }
Op0 = convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget);
SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, ContainerDstVT, Op0,
@@ -6027,6 +6037,8 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::VP_LSHR:
case ISD::VP_SHL:
return lowerVPOp(Op, DAG);
+ case ISD::VP_IS_FPCLASS:
+ return LowerIS_FPCLASS(Op, DAG);
case ISD::VP_SIGN_EXTEND:
case ISD::VP_ZERO_EXTEND:
if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index c221881267e391..4f7789a4f24f6e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2466,9 +2466,10 @@ foreach vti = AllFloatVectors in {
// 14.14. Vector Floating-Point Classify Instruction
def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2),
- (vti.Mask true_mask), VLOpFrag),
- (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX)
- (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>;
+ (vti.Mask V0), VLOpFrag),
+ (!cast<Instruction>("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK")
+ (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2,
+ (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>;
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
new file mode 100644
index 00000000000000..beba05719e4429
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass-vp.ll
@@ -0,0 +1,319 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
+; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
+; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
+
+define <2 x i1> @isnan_v2f16(<2 x half> %x, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT: ret
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl) ; nan
+ ret <2 x i1> %1
+}
+
+define <2 x i1> @isnan_v2f16_unmasked(<2 x half> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v2f16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half> %x, i32 3, <2 x i1> %m, i32 %evl) ; nan
+ ret <2 x i1> %1
+}
+
+define <2 x i1> @isnan_v2f32(<2 x float> %x, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 927
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT: ret
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> %m, i32 %evl)
+ ret <2 x i1> %1
+}
+
+define <2 x i1> @isnan_v2f32_unmasked(<2 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v2f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 927
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float> %x, i32 639, <2 x i1> %m, i32 %evl)
+ ret <2 x i1> %1
+}
+
+define <4 x i1> @isnan_v4f32(<4 x float> %x, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT: ret
+ %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> %m, i32 %evl) ; nan
+ ret <4 x i1> %1
+}
+
+define <4 x i1> @isnan_v4f32_unmasked(<4 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v4f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float> %x, i32 3, <4 x i1> %m, i32 %evl) ; nan
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @isnan_v8f32(<8 x float> %x, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfclass.v v10, v8, v0.t
+; CHECK-NEXT: li a0, 512
+; CHECK-NEXT: vmseq.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> %m, i32 %evl)
+ ret <8 x i1> %1
+}
+
+define <8 x i1> @isnan_v8f32_unmasked(<8 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v8f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 512
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float> %x, i32 2, <8 x i1> %m, i32 %evl)
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @isnan_v16f32(<16 x float> %x, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfclass.v v12, v8, v0.t
+; CHECK-NEXT: li a0, 256
+; CHECK-NEXT: vmseq.vx v8, v12, a0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> %m, i32 %evl)
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isnan_v16f32_unmasked(<16 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_v16f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 256
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float> %x, i32 1, <16 x i1> %m, i32 %evl)
+ ret <16 x i1> %1
+}
+
+define <2 x i1> @isnormal_v2f64(<2 x double> %x, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnormal_v2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 129
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
+; CHECK-NEXT: ret
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+ ret <2 x i1> %1
+}
+
+define <2 x i1> @isnormal_v2f64_unmasked(<2 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnormal_v2f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 129
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %1 = call <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double> %x, i32 516, <2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+ ret <2 x i1> %1
+}
+
+define <4 x i1> @isposinf_v4f64(<4 x double> %x, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isposinf_v4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfclass.v v10, v8, v0.t
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vmseq.vx v8, v10, a0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+ ret <4 x i1> %1
+}
+
+define <4 x i1> @isposinf_v4f64_unmasked(<4 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isposinf_v4f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %1 = call <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double> %x, i32 512, <4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+ ret <4 x i1> %1
+}
+
+define <8 x i1> @isneginf_v8f64(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isneginf_v8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfclass.v v12, v8, v0.t
+; CHECK-NEXT: vmseq.vi v8, v12, 1, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> %m, i32 %evl) ; "-inf"
+ ret <8 x i1> %1
+}
+
+define <8 x i1> @isneginf_v8f64_unmasked(<8 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isneginf_v8f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: vmseq.vi v0, v8, 1
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %1 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 4, <8 x i1> %m, i32 %evl) ; "-inf"
+ ret <8 x i1> %1
+}
+
+define <16 x i1> @isfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isfinite_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 126
+; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> %m, i32 %evl) ; 0x1f8 = "finite"
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isfinite_v16f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 126
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 504, <16 x i1> %m, i32 %evl) ; 0x1f8 = "finite"
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isposfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isposfinite_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 112
+; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 448, <16 x i1> %m, i32 %evl) ; 0x1c0 = "+finite"
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isnegfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnegfinite_v16f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: vand.vi v8, v8, 14
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 56, <16 x i1> %m, i32 %evl) ; 0x38 = "-finite"
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isnotfinite_v16f64(<16 x double> %x, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnotfinite_v16f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 897
+; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
+; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: ret
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> %m, i32 %evl) ; 0x207 = "inf|nan"
+ ret <16 x i1> %1
+}
+
+define <16 x i1> @isnotfinite_v16f64_unmasked(<16 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnotfinite_v16f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 897
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %1 = call <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double> %x, i32 519, <16 x i1> %m, i32 %evl) ; 0x207 = "inf|nan"
+ ret <16 x i1> %1
+}
+
+declare <2 x i1> @llvm.vp.is.fpclass.v2f16(<2 x half>, i32, <2 x i1>, i32)
+declare <2 x i1> @llvm.vp.is.fpclass.v2f32(<2 x float>, i32, <2 x i1>, i32)
+declare <4 x i1> @llvm.vp.is.fpclass.v4f32(<4 x float>, i32, <4 x i1>, i32)
+declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x i1>, i32)
+declare <16 x i1> @llvm.vp.is.fpclass.v16f32(<16 x float>, i32, <16 x i1>, i32)
+declare <2 x i1> @llvm.vp.is.fpclass.v2f64(<2 x double>, i32, <2 x i1>, i32)
+declare <4 x i1> @llvm.vp.is.fpclass.v4f64(<4 x double>, i32, <4 x i1>, i32)
+declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32)
+declare <16 x i1> @llvm.vp.is.fpclass.v16f64(<16 x double>, i32, <16 x i1>, i32)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
new file mode 100644
index 00000000000000..12bfc235c0c6d6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-vp.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 2 x i1> @isnan_nxv2f16(<vscale x 2 x half> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> %m, i32 %evl) ; nan
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 2 x i1> @isnan_nxv2f16_unmasked(<vscale x 2 x half> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv2f16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half> %x, i32 3, <vscale x 2 x i1> %m, i32 %evl) ; nan
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 2 x i1> @isnan_nxv2f32(<vscale x 2 x float> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv2f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 927
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 2 x i1> @isnan_nxv2f32_unmasked(<vscale x 2 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv2f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 927
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float> %x, i32 639, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 4 x i1> @isnan_nxv4f32(<vscale x 4 x float> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> %m, i32 %evl) ; nan
+ ret <vscale x 4 x i1> %1
+}
+
+define <vscale x 4 x i1> @isnan_nxv4f32_unmasked(<vscale x 4 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv4f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 768
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float> %x, i32 3, <vscale x 4 x i1> %m, i32 %evl) ; nan
+ ret <vscale x 4 x i1> %1
+}
+
+define <vscale x 8 x i1> @isnan_nxv8f32(<vscale x 8 x float> %x, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv8f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 512
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i1> %1
+}
+
+define <vscale x 8 x i1> @isnan_nxv8f32_unmasked(<vscale x 8 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv8f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 512
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float> %x, i32 2, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i1> %1
+}
+
+define <vscale x 16 x i1> @isnan_nxv16f32(<vscale x 16 x float> %x, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv16f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 256
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i1> %1
+}
+
+define <vscale x 16 x i1> @isnan_nxv16f32_unmasked(<vscale x 16 x float> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnan_nxv16f32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 256
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %1 = call <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float> %x, i32 1, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i1> %1
+}
+
+define <vscale x 2 x i1> @isnormal_nxv2f64(<vscale x 2 x double> %x, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isnormal_nxv2f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 129
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 2 x i1> @isnormal_nxv2f64_unmasked(<vscale x 2 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isnormal_nxv2f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 129
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %1 = call <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double> %x, i32 516, <vscale x 2 x i1> %m, i32 %evl) ; 0x204 = "inf"
+ ret <vscale x 2 x i1> %1
+}
+
+define <vscale x 4 x i1> @isposinf_nxv4f64(<vscale x 4 x double> %x, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isposinf_nxv4f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+ ret <vscale x 4 x i1> %1
+}
+
+define <vscale x 4 x i1> @isposinf_nxv4f64_unmasked(<vscale x 4 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isposinf_nxv4f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT: vmseq.vx v0, v8, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %1 = call <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double> %x, i32 512, <vscale x 4 x i1> %m, i32 %evl) ; 0x200 = "+inf"
+ ret <vscale x 4 x i1> %1
+}
+
+define <vscale x 8 x i1> @isneginf_nxv8f64(<vscale x 8 x double> %x, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: isneginf_nxv8f64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 1
+; CHECK-NEXT: ret
+ %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> %m, i32 %evl) ; "-inf"
+ ret <vscale x 8 x i1> %1
+}
+
+define <vscale x 8 x i1> @isneginf_nxv8f64_unmasked(<vscale x 8 x double> %x, i32 zeroext %evl) {
+; CHECK-LABEL: isneginf_nxv8f64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfclass.v v8, v8
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 1
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %1 = call <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double> %x, i32 4, <vscale x 8 x i1> %m, i32 %evl) ; "-inf"
+ ret <vscale x 8 x i1> %1
+}
+
+
+declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f16(<vscale x 2 x half>, i32, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f32(<vscale x 2 x float>, i32, <vscale x 2 x i1>, i32)
+declare <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f32(<vscale x 4 x float>, i32, <vscale x 4 x i1>, i32)
+declare <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f32(<vscale x 8 x float>, i32, <vscale x 8 x i1>, i32)
+declare <vscale x 16 x i1> @llvm.vp.is.fpclass.nxv16f32(<vscale x 16 x float>, i32, <vscale x 16 x i1>, i32)
+declare <vscale x 2 x i1> @llvm.vp.is.fpclass.nxv2f64(<vscale x 2 x double>, i32, <vscale x 2 x i1>, i32)
+declare <vscale x 4 x i1> @llvm.vp.is.fpclass.nxv4f64(<vscale x 4 x double>, i32, <vscale x 4 x i1>, i32)
+declare <vscale x 8 x i1> @llvm.vp.is.fpclass.nxv8f64(<vscale x 8 x double>, i32, <vscale x 8 x i1>, i32)
diff --git a/llvm/test/Verifier/llvm.vp.is.fpclass.ll b/llvm/test/Verifier/llvm.vp.is.fpclass.ll
new file mode 100644
index 00000000000000..e5d2e1a5de4fdb
--- /dev/null
+++ b/llvm/test/Verifier/llvm.vp.is.fpclass.ll
@@ -0,0 +1,23 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+
+; CHECK: immarg operand has non-immediate parameter
+; CHECK-NEXT: i32 %variable
+; CHECK-NEXT: %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 %evl)
+define <8 x i1> @test_mask_variable(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 zeroext %evl) {
+ %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 %variable, <8 x i1> %m, i32 %evl)
+ ret <8 x i1> %ret
+}
+
+; CHECK: unsupported bits for llvm.vp.is.fpclass test mask
+define <8 x i1> @test_mask_neg1(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) {
+ %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 -1, <8 x i1> %m, i32 %evl)
+ ret <8 x i1> %ret
+}
+
+; CHECK: unsupported bits for llvm.vp.is.fpclass test mask
+define <8 x i1> @test_mask_bit11(<8 x double> %x, <8 x i1> %m, i32 zeroext %evl) {
+ %ret = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %x, i32 2048, <8 x i1> %m, i32 %evl)
+ ret <8 x i1> %ret
+}
+
+declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32)
diff --git a/llvm/test/Verifier/vp-intrinsics.ll b/llvm/test/Verifier/vp-intrinsics.ll
index 96d3c6b218b210..765d67356c2b90 100644
--- a/llvm/test/Verifier/vp-intrinsics.ll
+++ b/llvm/test/Verifier/vp-intrinsics.ll
@@ -24,6 +24,7 @@ define void @test_vp_fp(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n)
%r2 = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n)
%r3 = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n)
%r4 = call <8 x double> @llvm.vp.frem.v8f64(<8 x double> %f0, <8 x double> %f1, <8 x i1> %m, i32 %n)
+ %r5 = call <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double> %f0, i32 639, <8 x i1> %m, i32 %n)
ret void
}
@@ -99,6 +100,7 @@ declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i
declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
declare <8 x double> @llvm.vp.frem.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32)
+declare <8 x i1> @llvm.vp.is.fpclass.v8f64(<8 x double>, i32, <8 x i1>, i32)
; reductions
declare i32 @llvm.vp.reduce.add.v8i32(i32, <8 x i32>, <8 x i1>, i32)
declare i32 @llvm.vp.reduce.mul.v8i32(i32, <8 x i32>, <8 x i1>, i32)
diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp
index e03d2e0ad125f6..9590d9b068ecfb 100644
--- a/llvm/unittests/IR/VPIntrinsicTest.cpp
+++ b/llvm/unittests/IR/VPIntrinsicTest.cpp
@@ -115,6 +115,8 @@ class VPIntrinsicTest : public testing::Test {
"i32>, i32)";
Str << " declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x "
"i32>, i32)";
+ Str << " declare <8 x i1> @llvm.vp.is.fpclass.v8f32(<8 x float>, i32, <8 x "
+ "i1>, i32)";
Str << " declare <8 x i32> @llvm.experimental.vp.splice.v8i32(<8 x "
"i32>, <8 x i32>, i32, <8 x i1>, i32, i32) ";
More information about the llvm-commits
mailing list