[llvm] 76fc871 - [RISCV] Support fptoi like ops for fp16 vectors input when only have Zvfhmin (#67532)

via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 7 19:44:23 PDT 2023


Author: Jianjian Guan
Date: 2023-10-08T10:44:19+08:00
New Revision: 76fc871552c28b37b81c67f411a90065077816a0

URL: https://github.com/llvm/llvm-project/commit/76fc871552c28b37b81c67f411a90065077816a0
DIFF: https://github.com/llvm/llvm-project/commit/76fc871552c28b37b81c67f411a90065077816a0.diff

LOG: [RISCV] Support fptoi like ops for fp16 vectors input when only have Zvfhmin (#67532)

This patch supports FP_TO_SINT, FP_TO_UINT, VP_FP_TO_SINT and
VP_FP_TO_UINT for fp16 vectors input when we only have Zvfhmin but no
Zvfh.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f9344f6f6b096e3..b66a596f1cbe692 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5751,6 +5751,22 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     [[fallthrough]];
   case ISD::FP_TO_SINT:
   case ISD::FP_TO_UINT:
+    if (SDValue Op1 = Op.getOperand(0);
+        Op1.getValueType().isVector() &&
+        Op1.getValueType().getScalarType() == MVT::f16 &&
+        (Subtarget.hasVInstructionsF16Minimal() &&
+         !Subtarget.hasVInstructionsF16())) {
+      if (Op1.getValueType() == MVT::nxv32f16)
+        return SplitVectorOp(Op, DAG);
+      // f16 -> f32
+      SDLoc DL(Op);
+      MVT NVT = MVT::getVectorVT(MVT::f32,
+                                 Op1.getValueType().getVectorElementCount());
+      SDValue WidenVec = DAG.getNode(ISD::FP_EXTEND, DL, NVT, Op1);
+      // f32 -> int
+      return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), WidenVec);
+    }
+    [[fallthrough]];
   case ISD::STRICT_FP_TO_SINT:
   case ISD::STRICT_FP_TO_UINT:
   case ISD::STRICT_SINT_TO_FP:
@@ -6297,6 +6313,22 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     [[fallthrough]];
   case ISD::VP_FP_TO_SINT:
   case ISD::VP_FP_TO_UINT:
+    if (SDValue Op1 = Op.getOperand(0);
+        Op1.getValueType().isVector() &&
+        Op1.getValueType().getScalarType() == MVT::f16 &&
+        (Subtarget.hasVInstructionsF16Minimal() &&
+         !Subtarget.hasVInstructionsF16())) {
+      if (Op1.getValueType() == MVT::nxv32f16)
+        return SplitVPOp(Op, DAG);
+      // f16 -> f32
+      SDLoc DL(Op);
+      MVT NVT = MVT::getVectorVT(MVT::f32,
+                                 Op1.getValueType().getVectorElementCount());
+      SDValue WidenVec = DAG.getNode(ISD::FP_EXTEND, DL, NVT, Op1);
+      // f32 -> int
+      return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(),
+                         {WidenVec, Op.getOperand(1), Op.getOperand(2)});
+    }
     return lowerVPFPIntConvOp(Op, DAG);
   case ISD::VP_SETCC:
     if (Op.getOperand(0).getSimpleValueType() == MVT::nxv32f16 &&

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 4e44abc2c0b7fa4..44b96d076df4552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -1,8 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32,LMULMAX8RV32ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64,LMULMAX8RV64ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32,LMULMAX1RV32ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64,LMULMAX1RV64ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32,LMULMAX8RV32ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64,LMULMAX8RV64ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32,LMULMAX1RV32ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+f,+d -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64,LMULMAX1RV64ZVFHMIN
 
 define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) {
 ; CHECK-LABEL: fp2si_v2f32_v2i32:
@@ -589,25 +593,145 @@ define void @fp2ui_v2f16_v2i64(ptr %x, ptr %y) {
 }
 
 define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) {
-; CHECK-LABEL: fp2si_v2f16_v2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; LMULMAX8RV32ZVFH-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX8RV32ZVFH:       # %bb.0:
+; LMULMAX8RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX8RV32ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; LMULMAX8RV32ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX8RV32ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV32ZVFH-NEXT:    ret
+;
+; LMULMAX8RV64ZVFH-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX8RV64ZVFH:       # %bb.0:
+; LMULMAX8RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX8RV64ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; LMULMAX8RV64ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX8RV64ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV64ZVFH-NEXT:    ret
+;
+; LMULMAX1RV32ZVFH-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX1RV32ZVFH:       # %bb.0:
+; LMULMAX1RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1RV32ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; LMULMAX1RV32ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX1RV32ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV32ZVFH-NEXT:    ret
+;
+; LMULMAX1RV64ZVFH-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX1RV64ZVFH:       # %bb.0:
+; LMULMAX1RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1RV64ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; LMULMAX1RV64ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX1RV64ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV64ZVFH-NEXT:    ret
+;
+; LMULMAX8RV32ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX8RV32ZVFHMIN:       # %bb.0:
+; LMULMAX8RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX8RV32ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX8RV32ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; LMULMAX8RV32ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX8RV32ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV32ZVFHMIN-NEXT:    ret
+;
+; LMULMAX8RV64ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX8RV64ZVFHMIN:       # %bb.0:
+; LMULMAX8RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX8RV64ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX8RV64ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; LMULMAX8RV64ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX8RV64ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV64ZVFHMIN-NEXT:    ret
+;
+; LMULMAX1RV32ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX1RV32ZVFHMIN:       # %bb.0:
+; LMULMAX1RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1RV32ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX1RV32ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; LMULMAX1RV32ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX1RV32ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV32ZVFHMIN-NEXT:    ret
+;
+; LMULMAX1RV64ZVFHMIN-LABEL: fp2si_v2f16_v2i1:
+; LMULMAX1RV64ZVFHMIN:       # %bb.0:
+; LMULMAX1RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1RV64ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX1RV64ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; LMULMAX1RV64ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX1RV64ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV64ZVFHMIN-NEXT:    ret
   %z = fptosi <2 x half> %x to <2 x i1>
   ret <2 x i1> %z
 }
 
 define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
-; CHECK-LABEL: fp2ui_v2f16_v2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; LMULMAX8RV32ZVFH-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX8RV32ZVFH:       # %bb.0:
+; LMULMAX8RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX8RV32ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; LMULMAX8RV32ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX8RV32ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV32ZVFH-NEXT:    ret
+;
+; LMULMAX8RV64ZVFH-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX8RV64ZVFH:       # %bb.0:
+; LMULMAX8RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX8RV64ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; LMULMAX8RV64ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX8RV64ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV64ZVFH-NEXT:    ret
+;
+; LMULMAX1RV32ZVFH-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX1RV32ZVFH:       # %bb.0:
+; LMULMAX1RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1RV32ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; LMULMAX1RV32ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX1RV32ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV32ZVFH-NEXT:    ret
+;
+; LMULMAX1RV64ZVFH-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX1RV64ZVFH:       # %bb.0:
+; LMULMAX1RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; LMULMAX1RV64ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; LMULMAX1RV64ZVFH-NEXT:    vand.vi v8, v9, 1
+; LMULMAX1RV64ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV64ZVFH-NEXT:    ret
+;
+; LMULMAX8RV32ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX8RV32ZVFHMIN:       # %bb.0:
+; LMULMAX8RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX8RV32ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX8RV32ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; LMULMAX8RV32ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX8RV32ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV32ZVFHMIN-NEXT:    ret
+;
+; LMULMAX8RV64ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX8RV64ZVFHMIN:       # %bb.0:
+; LMULMAX8RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX8RV64ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX8RV64ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; LMULMAX8RV64ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX8RV64ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX8RV64ZVFHMIN-NEXT:    ret
+;
+; LMULMAX1RV32ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX1RV32ZVFHMIN:       # %bb.0:
+; LMULMAX1RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1RV32ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX1RV32ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; LMULMAX1RV32ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX1RV32ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV32ZVFHMIN-NEXT:    ret
+;
+; LMULMAX1RV64ZVFHMIN-LABEL: fp2ui_v2f16_v2i1:
+; LMULMAX1RV64ZVFHMIN:       # %bb.0:
+; LMULMAX1RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; LMULMAX1RV64ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; LMULMAX1RV64ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; LMULMAX1RV64ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; LMULMAX1RV64ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; LMULMAX1RV64ZVFHMIN-NEXT:    ret
   %z = fptoui <2 x half> %x to <2 x i1>
   ret <2 x i1> %z
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
index 92799ea39136562..dab4b4d9926e1db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp-mask.ll
@@ -1,27 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i1> @vfptosi_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i1_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
-; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i1_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i1_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i1> %v
 }
 
 define <4 x i1> @vfptosi_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i1_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i1_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i1_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i1> @llvm.vp.fptosi.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i1> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
index 9a78c4daeb88f92..c673e396914bf39 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
@@ -1,16 +1,28 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <4 x i7> @llvm.vp.fptosi.v4i7.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i7_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i7_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i7_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i7> @llvm.vp.fptosi.v4i7.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i7> %v
 }
@@ -18,23 +30,43 @@ define <4 x i7> @vfptosi_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev
 declare <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i8> @vfptosi_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i8_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i8_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i8_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
 define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i8_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i8_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i8_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.fptosi.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i8> %v
 }
@@ -42,21 +74,37 @@ define <4 x i8> @vfptosi_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i16> @vfptosi_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i16_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i16_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i16_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
 define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i16_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i16_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i16_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.fptosi.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i16> %v
 }
@@ -64,23 +112,39 @@ define <4 x i16> @vfptosi_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 declare <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i32> @vfptosi_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i32_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i32_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i32_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
 define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i32_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i32_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i32_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.fptosi.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i32> %v
 }
@@ -88,25 +152,41 @@ define <4 x i32> @vfptosi_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 declare <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i64> @vfptosi_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i64_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i64_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i64_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
 define <4 x i64> @vfptosi_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i64_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i64_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i64_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.fptosi.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
index 7e8f7ae8cd71d10..f1a78b25e186202 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp-mask.ll
@@ -1,27 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i1> @vfptoui_v4i1_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i1_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
-; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i1_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i1_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i1> %v
 }
 
 define <4 x i1> @vfptoui_v4i1_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i1_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i1_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i1_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i1> @llvm.vp.fptoui.v4i1.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i1> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
index 7672540c235c13d..0a19dcb550b58ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
@@ -1,16 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <4 x i7> @llvm.vp.fptoui.v4i7.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i7_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i7_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i7_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i7> @llvm.vp.fptoui.v4i7.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i7> %v
 }
@@ -18,23 +30,43 @@ define <4 x i7> @vfptoui_v4i7_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %ev
 declare <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i8> @vfptoui_v4i8_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i8_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i8_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i8_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
 
 define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i8_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i8_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i8_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.fptoui.v4i8.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i8> %v
 }
@@ -42,21 +74,37 @@ define <4 x i8> @vfptoui_v4i8_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i16> @vfptoui_v4i16_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i16_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i16_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i16_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
 
 define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i16_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i16_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i16_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.fptoui.v4i16.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i16> %v
 }
@@ -64,23 +112,39 @@ define <4 x i16> @vfptoui_v4i16_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 declare <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i32> @vfptoui_v4i32_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i32_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i32_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i32_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
 
 define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i32_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i32_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i32_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.fptoui.v4i32.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i32> %v
 }
@@ -88,25 +152,41 @@ define <4 x i32> @vfptoui_v4i32_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 declare <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x i64> @vfptoui_v4i64_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i64_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i64_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i64_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
 
 define <4 x i64> @vfptoui_v4i64_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i64_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i64_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i64_v4f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.fptoui.v4i64.v4f16(<4 x half> %va, <4 x i1> shufflevector (<4 x i1> insertelement (<4 x i1> undef, i1 true, i32 0), <4 x i1> undef, <4 x i32> zeroinitializer), i32 %evl)
   ret <4 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
index 3430d6991236fd1..8e983f63428a6a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
@@ -1,115 +1,203 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
-; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv1f16_nxv1i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 1 x half> %va to <vscale x 1 x i1>
   ret <vscale x 1 x i1> %evec
 }
 
 define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i7:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv1f16_nxv1i7:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 1 x half> %va to <vscale x 1 x i7>
   ret <vscale x 1 x i7> %evec
 }
 
 define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv1f16_nxv1i7:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv1f16_nxv1i7:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 1 x half> %va to <vscale x 1 x i7>
   ret <vscale x 1 x i7> %evec
 }
 
 define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv1f16_nxv1i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv1f16_nxv1i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 1 x half> %va to <vscale x 1 x i1>
   ret <vscale x 1 x i1> %evec
 }
 
 define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv1f16_nxv1i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 1 x half> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
 }
 
 define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv1f16_nxv1i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv1f16_nxv1i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 1 x half> %va to <vscale x 1 x i8>
   ret <vscale x 1 x i8> %evec
 }
 
 define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv1f16_nxv1i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 1 x half> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
 }
 
 define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv1f16_nxv1i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv1f16_nxv1i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 1 x half> %va to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %evec
 }
 
 define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv1f16_nxv1i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv1f16_nxv1i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 1 x half> %va to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %evec
 }
 
 define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv1f16_nxv1i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv1f16_nxv1i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 1 x half> %va to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %evec
 }
@@ -139,89 +227,155 @@ define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
 }
 
 define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2f16_nxv2i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2f16_nxv2i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 2 x half> %va to <vscale x 2 x i1>
   ret <vscale x 2 x i1> %evec
 }
 
 define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2f16_nxv2i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2f16_nxv2i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 2 x half> %va to <vscale x 2 x i1>
   ret <vscale x 2 x i1> %evec
 }
 
 define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2f16_nxv2i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2f16_nxv2i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 2 x half> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
 }
 
 define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2f16_nxv2i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2f16_nxv2i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 2 x half> %va to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %evec
 }
 
 define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2f16_nxv2i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2f16_nxv2i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 2 x half> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
 }
 
 define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2f16_nxv2i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2f16_nxv2i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 2 x half> %va to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %evec
 }
 
 define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2f16_nxv2i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2f16_nxv2i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 2 x half> %va to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %evec
 }
 
 define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2f16_nxv2i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2f16_nxv2i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 2 x half> %va to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %evec
 }
@@ -251,89 +405,155 @@ define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
 }
 
 define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv4f16_nxv4i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv4f16_nxv4i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 4 x half> %va to <vscale x 4 x i1>
   ret <vscale x 4 x i1> %evec
 }
 
 define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vand.vi v8, v9, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv4f16_nxv4i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vand.vi v8, v9, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv4f16_nxv4i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 4 x half> %va to <vscale x 4 x i1>
   ret <vscale x 4 x i1> %evec
 }
 
 define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv4f16_nxv4i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv4f16_nxv4i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 4 x half> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
 }
 
 define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv4f16_nxv4i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv4f16_nxv4i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 4 x half> %va to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %evec
 }
 
 define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv4f16_nxv4i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv4f16_nxv4i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v10
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 4 x half> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
 }
 
 define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv4f16_nxv4i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv4f16_nxv4i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v10
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 4 x half> %va to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %evec
 }
 
 define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv4f16_nxv4i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v10, v8
+; ZVFH-NEXT:    vmv2r.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv4f16_nxv4i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 4 x half> %va to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %evec
 }
 
 define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
-; CHECK-NEXT:    vmv2r.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv4f16_nxv4i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v10, v8
+; ZVFH-NEXT:    vmv2r.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv4f16_nxv4i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 4 x half> %va to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %evec
 }
@@ -363,89 +583,155 @@ define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
 }
 
 define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
-; CHECK-NEXT:    vand.vi v8, v10, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv8f16_nxv8i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v10, v8
+; ZVFH-NEXT:    vand.vi v8, v10, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv8f16_nxv8i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v12
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 8 x half> %va to <vscale x 8 x i1>
   ret <vscale x 8 x i1> %evec
 }
 
 define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
-; CHECK-NEXT:    vand.vi v8, v10, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv8f16_nxv8i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v10, v8
+; ZVFH-NEXT:    vand.vi v8, v10, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv8f16_nxv8i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v12
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 8 x half> %va to <vscale x 8 x i1>
   ret <vscale x 8 x i1> %evec
 }
 
 define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv8f16_nxv8i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v10, v8
+; ZVFH-NEXT:    vmv.v.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv8f16_nxv8i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v10, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 8 x half> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
 }
 
 define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv8f16_nxv8i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v10, v8
+; ZVFH-NEXT:    vmv.v.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv8f16_nxv8i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v10, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 8 x half> %va to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %evec
 }
 
 define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv8f16_nxv8i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv8f16_nxv8i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v12
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 8 x half> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec
 }
 
 define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv8f16_nxv8i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv8f16_nxv8i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v12
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 8 x half> %va to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %evec
 }
 
 define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv8f16_nxv8i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v12, v8
+; ZVFH-NEXT:    vmv4r.v v8, v12
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv8f16_nxv8i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 8 x half> %va to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %evec
 }
 
 define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v12, v8
-; CHECK-NEXT:    vmv4r.v v8, v12
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv8f16_nxv8i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v12, v8
+; ZVFH-NEXT:    vmv4r.v v8, v12
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv8f16_nxv8i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v12
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 8 x half> %va to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %evec
 }
@@ -475,155 +761,303 @@ define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
 }
 
 define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
-; CHECK-NEXT:    vand.vi v8, v12, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv16f16_nxv16i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v12, v8
+; ZVFH-NEXT:    vand.vi v8, v12, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv16f16_nxv16i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 16 x half> %va to <vscale x 16 x i1>
   ret <vscale x 16 x i1> %evec
 }
 
 define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
-; CHECK-NEXT:    vand.vi v8, v12, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv16f16_nxv16i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v12, v8
+; ZVFH-NEXT:    vand.vi v8, v12, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv16f16_nxv16i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 16 x half> %va to <vscale x 16 x i1>
   ret <vscale x 16 x i1> %evec
 }
 
 define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv16f16_nxv16i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v12, v8
+; ZVFH-NEXT:    vmv.v.v v8, v12
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv16f16_nxv16i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v12, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 16 x half> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
 }
 
 define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv16f16_nxv16i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v12, v8
+; ZVFH-NEXT:    vmv.v.v v8, v12
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv16f16_nxv16i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v12, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 16 x half> %va to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %evec
 }
 
 define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv16f16_nxv16i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv16f16_nxv16i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 16 x half> %va to <vscale x 16 x i16>
   ret <vscale x 16 x i16> %evec
 }
 
 define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv16f16_nxv16i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv16f16_nxv16i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 16 x half> %va to <vscale x 16 x i16>
   ret <vscale x 16 x i16> %evec
 }
 
 define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv16f16_nxv16i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v16, v8
+; ZVFH-NEXT:    vmv8r.v v8, v16
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv16f16_nxv16i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 16 x half> %va to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %evec
 }
 
 define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
-; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv16f16_nxv16i32:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v16, v8
+; ZVFH-NEXT:    vmv8r.v v8, v16
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv16f16_nxv16i32:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 16 x half> %va to <vscale x 16 x i32>
   ret <vscale x 16 x i32> %evec
 }
 
 define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
-; CHECK-NEXT:    vand.vi v8, v16, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv32f16_nxv32i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v16, v8
+; ZVFH-NEXT:    vand.vi v8, v16, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv32f16_nxv32i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    srli a0, a0, 2
+; ZVFHMIN-NEXT:    add a1, a0, a0
+; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; ZVFHMIN-NEXT:    vand.vi v12, v12, 1
+; ZVFHMIN-NEXT:    vmsne.vi v16, v12, 0
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v24
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; ZVFHMIN-NEXT:    vslideup.vx v0, v16, a0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 32 x half> %va to <vscale x 32 x i1>
   ret <vscale x 32 x i1> %evec
 }
 
 define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
-; CHECK-NEXT:    vand.vi v8, v16, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv32f16_nxv32i1:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v16, v8
+; ZVFH-NEXT:    vand.vi v8, v16, 1
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv32f16_nxv32i1:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    csrr a0, vlenb
+; ZVFHMIN-NEXT:    srli a0, a0, 2
+; ZVFHMIN-NEXT:    add a1, a0, a0
+; ZVFHMIN-NEXT:    vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; ZVFHMIN-NEXT:    vand.vi v12, v12, 1
+; ZVFHMIN-NEXT:    vmsne.vi v16, v12, 0
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v24
+; ZVFHMIN-NEXT:    vand.vi v8, v8, 1
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    vsetvli zero, a1, e8, mf2, tu, ma
+; ZVFHMIN-NEXT:    vslideup.vx v0, v16, a0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 32 x half> %va to <vscale x 32 x i1>
   ret <vscale x 32 x i1> %evec
 }
 
 define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv32f16_nxv32i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v16, v8
+; ZVFH-NEXT:    vmv.v.v v8, v16
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv32f16_nxv32i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v24, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v24, 0
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v10, v12, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 32 x half> %va to <vscale x 32 x i8>
   ret <vscale x 32 x i8> %evec
 }
 
 define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv32f16_nxv32i8:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v16, v8
+; ZVFH-NEXT:    vmv.v.v v8, v16
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv32f16_nxv32i8:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v24, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v24, 0
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v10, v12, 0
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 32 x half> %va to <vscale x 32 x i8>
   ret <vscale x 32 x i8> %evec
 }
 
 define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv32f16_nxv32i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv32f16_nxv32i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v16
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v12, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptosi <vscale x 32 x half> %va to <vscale x 32 x i16>
   ret <vscale x 32 x i16> %evec
 }
 
 define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
-; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv32f16_nxv32i16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv32f16_nxv32i16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v16
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v12, v16
+; ZVFHMIN-NEXT:    ret
   %evec = fptoui <vscale x 32 x half> %va to <vscale x 32 x i16>
   ret <vscale x 32 x i16> %evec
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
index 908cb0b15360d16..9061c38975e2835 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
@@ -1,27 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i1_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
-; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i1_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i1_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i1> %v
 }
 
 define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i1_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i1_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i1_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i1> @llvm.vp.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i1> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index 9b0416b4032430a..9e7d6f92d84e936 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -1,16 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i7> @vfptosi_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_v4i7_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_v4i7_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_v4i7_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i7> @llvm.vp.fptosi.v4i7.v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i7> %v
 }
@@ -18,23 +30,43 @@ define <vscale x 2 x i7> @vfptosi_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x
 declare <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
 define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -42,21 +74,37 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
 declare <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
 define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -64,23 +112,39 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
 declare <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
 define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -88,25 +152,41 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
 declare <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i64_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.x.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
 define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.x.f.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.x.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
index 1f07c3d1159c9ab..6646171fcd15eb3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
@@ -1,27 +1,47 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i1_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
-; CHECK-NEXT:    vmsne.vi v0, v8, 0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i1> %v
 }
 
 define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    vmsne.vi v0, v8, 0
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    vmsne.vi v0, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i1> @llvm.vp.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i1> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 00f8fb1db5ab213..486efbe66a6fea0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -1,16 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfh < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+zfh,+zvfhmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i7> @vfptoui_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_v4i7_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_v4i7_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.x.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_v4i7_v4f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.x.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i7> @llvm.vp.fptoui.v4i7.v4f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i7> %v
 }
@@ -18,23 +30,43 @@ define <vscale x 2 x i7> @vfptoui_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x
 declare <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i8_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
 
 define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVFH-NEXT:    vfncvt.rtz.xu.f.w v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVFHMIN-NEXT:    vnsrl.wi v8, v8, 0
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -42,21 +74,37 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
 declare <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i16_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
 
 define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.rtz.xu.f.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.rtz.xu.f.v v8, v8
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.rtz.xu.f.w v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -64,23 +112,39 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
 declare <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i32_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
 
 define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v9, v8
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.xu.f.v v8, v9
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -88,25 +152,41 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
 declare <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i64_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.xu.f.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
 
 define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; ZVFH-NEXT:    ret
+;
+; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
+; ZVFHMIN:       # %bb.0:
+; ZVFHMIN-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.rtz.xu.f.v v8, v10
+; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
   ret <vscale x 2 x i64> %v
 }


        


More information about the llvm-commits mailing list