[llvm] 57aee4e - [RISCV] Add check-prefixes ZVFH for zvfh and CHECK for zvfhmin. NFC.

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 27 20:16:11 PDT 2023


Author: Jim Lin
Date: 2023-08-28T11:14:00+08:00
New Revision: 57aee4ea69c73c5be8023d1ba829532a2cdfaa0e

URL: https://github.com/llvm/llvm-project/commit/57aee4ea69c73c5be8023d1ba829532a2cdfaa0e
DIFF: https://github.com/llvm/llvm-project/commit/57aee4ea69c73c5be8023d1ba829532a2cdfaa0e.diff

LOG: [RISCV] Add check-prefixes ZVFH for zvfh and CHECK for zvfhmin. NFC.

It has more readability to show the difference or the same between Zvfh and Zvfhmin.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 5c98de31079dd2..ce31c1014f9510 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.ceil.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -53,20 +53,20 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 }
 
 define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -95,22 +95,22 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.ceil.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -137,20 +137,20 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 }
 
 define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -179,22 +179,22 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.ceil.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -223,20 +223,20 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 }
 
 define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -265,24 +265,24 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vmv1r.v v10, v0
+; ZVFH-NEXT:    lui a1, %hi(.LCPI6_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v10, v12, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv1r.v v0, v10
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -311,20 +311,20 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 }
 
 define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_ceil_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI7_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 3
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -369,23 +369,6 @@ define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -405,21 +388,6 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.ceil.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -445,23 +413,6 @@ define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.ceil.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -481,21 +432,6 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.ceil.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -523,25 +459,6 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.ceil.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -561,21 +478,6 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.ceil.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -603,25 +505,6 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.ceil.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -641,21 +524,6 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.ceil.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -681,23 +549,6 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -717,21 +568,6 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -759,25 +595,6 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI18_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -797,21 +614,6 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -839,25 +641,6 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI20_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -877,21 +660,6 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -919,25 +687,6 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI22_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -957,21 +706,6 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -999,25 +733,6 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI24_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1037,21 +752,6 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -1129,75 +829,6 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI26_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a1, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vmflt.vf v1, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1236,40 +867,6 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v16
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 934349f22ebfc0..f1d67e77a00b5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.floor.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -53,20 +53,20 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 }
 
 define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -95,22 +95,22 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.floor.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -137,20 +137,20 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 }
 
 define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -179,22 +179,22 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.floor.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -223,20 +223,20 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 }
 
 define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -265,24 +265,24 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vmv1r.v v10, v0
+; ZVFH-NEXT:    lui a1, %hi(.LCPI6_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v10, v12, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv1r.v v0, v10
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -311,20 +311,20 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 }
 
 define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_floor_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI7_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 2
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_floor_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -369,23 +369,6 @@ define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -405,21 +388,6 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.floor.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -445,23 +413,6 @@ define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -481,21 +432,6 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.floor.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -523,25 +459,6 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -561,21 +478,6 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.floor.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -603,25 +505,6 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -641,21 +524,6 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.floor.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -681,23 +549,6 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -717,21 +568,6 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -759,25 +595,6 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI18_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -797,21 +614,6 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -839,25 +641,6 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI20_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -877,21 +660,6 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -919,25 +687,6 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI22_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -957,21 +706,6 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -999,25 +733,6 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI24_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1037,21 +752,6 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -1129,75 +829,6 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI26_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a1, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vmflt.vf v1, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1236,40 +867,6 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_floor_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v16
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
index 1abcd6ee47f455..e5333be703d931 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
@@ -1,30 +1,30 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 ; This file tests the code generation for `llvm.round.*` on fixed vector type.
 
 define <1 x half> @round_v1f16(<1 x half> %x) {
-; CHECK-LABEL: round_v1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v1f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v1f16:
 ; ZVFHMIN:       # %bb.0:
@@ -50,20 +50,20 @@ define <1 x half> @round_v1f16(<1 x half> %x) {
 declare <1 x half> @llvm.round.v1f16(<1 x half>)
 
 define <2 x half> @round_v2f16(<2 x half> %x) {
-; CHECK-LABEL: round_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -89,20 +89,20 @@ define <2 x half> @round_v2f16(<2 x half> %x) {
 declare <2 x half> @llvm.round.v2f16(<2 x half>)
 
 define <4 x half> @round_v4f16(<4 x half> %x) {
-; CHECK-LABEL: round_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -128,20 +128,20 @@ define <4 x half> @round_v4f16(<4 x half> %x) {
 declare <4 x half> @llvm.round.v4f16(<4 x half>)
 
 define <8 x half> @round_v8f16(<8 x half> %x) {
-; CHECK-LABEL: round_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -167,20 +167,20 @@ define <8 x half> @round_v8f16(<8 x half> %x) {
 declare <8 x half> @llvm.round.v8f16(<8 x half>)
 
 define <16 x half> @round_v16f16(<16 x half> %x) {
-; CHECK-LABEL: round_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -206,21 +206,21 @@ define <16 x half> @round_v16f16(<16 x half> %x) {
 declare <16 x half> @llvm.round.v16f16(<16 x half>)
 
 define <32 x half> @round_v32f16(<32 x half> %x) {
-; CHECK-LABEL: round_v32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: round_v32f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT:    li a0, 32
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: round_v32f16:
 ; ZVFHMIN:       # %bb.0:
@@ -261,21 +261,6 @@ define <1 x float> @round_v1f32(<1 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v1f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <1 x float> @llvm.round.v1f32(<1 x float> %x)
   ret <1 x float> %a
 }
@@ -296,21 +281,6 @@ define <2 x float> @round_v2f32(<2 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <2 x float> @llvm.round.v2f32(<2 x float> %x)
   ret <2 x float> %a
 }
@@ -331,21 +301,6 @@ define <4 x float> @round_v4f32(<4 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <4 x float> @llvm.round.v4f32(<4 x float> %x)
   ret <4 x float> %a
 }
@@ -366,21 +321,6 @@ define <8 x float> @round_v8f32(<8 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <8 x float> @llvm.round.v8f32(<8 x float> %x)
   ret <8 x float> %a
 }
@@ -401,21 +341,6 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <16 x float> @llvm.round.v16f32(<16 x float> %x)
   ret <16 x float> %a
 }
@@ -436,21 +361,6 @@ define <1 x double> @round_v1f64(<1 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v1f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI11_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <1 x double> @llvm.round.v1f64(<1 x double> %x)
   ret <1 x double> %a
 }
@@ -471,21 +381,6 @@ define <2 x double> @round_v2f64(<2 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI12_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <2 x double> @llvm.round.v2f64(<2 x double> %x)
   ret <2 x double> %a
 }
@@ -506,21 +401,6 @@ define <4 x double> @round_v4f64(<4 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI13_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <4 x double> @llvm.round.v4f64(<4 x double> %x)
   ret <4 x double> %a
 }
@@ -541,21 +421,6 @@ define <8 x double> @round_v8f64(<8 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI14_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <8 x double> @llvm.round.v8f64(<8 x double> %x)
   ret <8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
index ce25febe1ee80c..0f8f298acf4670 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
@@ -1,30 +1,30 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 ; This file tests the code generation for `llvm.roundeven.*` on fixed vector type.
 
 define <1 x half> @roundeven_v1f16(<1 x half> %x) {
-; CHECK-LABEL: roundeven_v1f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v1f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v1f16:
 ; ZVFHMIN:       # %bb.0:
@@ -50,20 +50,20 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) {
 declare <1 x half> @llvm.roundeven.v1f16(<1 x half>)
 
 define <2 x half> @roundeven_v2f16(<2 x half> %x) {
-; CHECK-LABEL: roundeven_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -89,20 +89,20 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) {
 declare <2 x half> @llvm.roundeven.v2f16(<2 x half>)
 
 define <4 x half> @roundeven_v4f16(<4 x half> %x) {
-; CHECK-LABEL: roundeven_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -128,20 +128,20 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) {
 declare <4 x half> @llvm.roundeven.v4f16(<4 x half>)
 
 define <8 x half> @roundeven_v8f16(<8 x half> %x) {
-; CHECK-LABEL: roundeven_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -167,20 +167,20 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) {
 declare <8 x half> @llvm.roundeven.v8f16(<8 x half>)
 
 define <16 x half> @roundeven_v16f16(<16 x half> %x) {
-; CHECK-LABEL: roundeven_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -206,21 +206,21 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) {
 declare <16 x half> @llvm.roundeven.v16f16(<16 x half>)
 
 define <32 x half> @roundeven_v32f16(<32 x half> %x) {
-; CHECK-LABEL: roundeven_v32f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8
-; CHECK-NEXT:    vmflt.vf v0, v12, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: roundeven_v32f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a0, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT:    li a0, 32
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: roundeven_v32f16:
 ; ZVFHMIN:       # %bb.0:
@@ -261,21 +261,6 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v1f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <1 x float> @llvm.roundeven.v1f32(<1 x float> %x)
   ret <1 x float> %a
 }
@@ -296,21 +281,6 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <2 x float> @llvm.roundeven.v2f32(<2 x float> %x)
   ret <2 x float> %a
 }
@@ -331,21 +301,6 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %x)
   ret <4 x float> %a
 }
@@ -366,21 +321,6 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %x)
   ret <8 x float> %a
 }
@@ -401,21 +341,6 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <16 x float> @llvm.roundeven.v16f32(<16 x float> %x)
   ret <16 x float> %a
 }
@@ -436,21 +361,6 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v1f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI11_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI11_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x)
   ret <1 x double> %a
 }
@@ -471,21 +381,6 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI12_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI12_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
   ret <2 x double> %a
 }
@@ -506,21 +401,6 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI13_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI13_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
   ret <4 x double> %a
 }
@@ -541,21 +421,6 @@ define <8 x double> @roundeven_v8f64(<8 x double> %x) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: roundeven_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a0, %hi(.LCPI14_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI14_0)(a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
   ret <8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index c466c875fad37a..63005e6ff440b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -1,24 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare half @llvm.vp.reduce.fadd.v2f16(half, <2 x half>, <2 x i1>, i32)
 
 define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_fadd_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
-; CHECK-NEXT:    vfmv.f.s fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpreduce_fadd_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmv.s.f v9, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfredusum.vs v9, v8, v9, v0.t
+; ZVFH-NEXT:    vfmv.f.s fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vpreduce_fadd_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -37,14 +37,14 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroex
 }
 
 define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_ord_fadd_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
-; CHECK-NEXT:    vfmv.f.s fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpreduce_ord_fadd_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmv.s.f v9, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfredosum.vs v9, v8, v9, v0.t
+; ZVFH-NEXT:    vfmv.f.s fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -65,14 +65,14 @@ define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 ze
 declare half @llvm.vp.reduce.fadd.v4f16(half, <4 x half>, <4 x i1>, i32)
 
 define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_fadd_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
-; CHECK-NEXT:    vfmv.f.s fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpreduce_fadd_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmv.s.f v9, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfredusum.vs v9, v8, v9, v0.t
+; ZVFH-NEXT:    vfmv.f.s fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vpreduce_fadd_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -91,14 +91,14 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroex
 }
 
 define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_ord_fadd_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmv.s.f v9, fa0
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
-; CHECK-NEXT:    vfmv.f.s fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vpreduce_ord_fadd_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmv.s.f v9, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfredosum.vs v9, v8, v9, v0.t
+; ZVFH-NEXT:    vfmv.f.s fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -127,15 +127,6 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -149,15 +140,6 @@ define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -173,15 +155,6 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -195,15 +168,6 @@ define float @vpreduce_ord_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -234,30 +198,6 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
 ; CHECK-NEXT:    vfredusum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v64f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    li a2, 32
-; ZVFHMIN-NEXT:    vslidedown.vi v24, v0, 4
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB8_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 32
-; ZVFHMIN-NEXT:  .LBB8_2:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v25, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v25, v8, v25, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -32
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; ZVFHMIN-NEXT:    vfredusum.vs v25, v16, v25, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v25
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -286,30 +226,6 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m,
 ; CHECK-NEXT:    vfredosum.vs v25, v16, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v64f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    li a2, 32
-; ZVFHMIN-NEXT:    vslidedown.vi v24, v0, 4
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB9_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 32
-; ZVFHMIN-NEXT:  .LBB9_2:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v25, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v25, v8, v25, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -32
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; ZVFHMIN-NEXT:    vfredosum.vs v25, v16, v25, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v25
-; ZVFHMIN-NEXT:    ret
   %r = call float @llvm.vp.reduce.fadd.v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 %evl)
   ret float %r
 }
@@ -325,15 +241,6 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32
 ; CHECK-NEXT:    vfredusum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl)
   ret double %r
 }
@@ -347,15 +254,6 @@ define double @vpreduce_ord_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m,
 ; CHECK-NEXT:    vfredosum.vs v9, v8, v9, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v9, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v9
-; ZVFHMIN-NEXT:    ret
   %r = call double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl)
   ret double %r
 }
@@ -371,15 +269,6 @@ define double @vpreduce_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v3f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v10, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v10
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 %evl)
   ret double %r
 }
@@ -393,15 +282,6 @@ define double @vpreduce_ord_fadd_v3f64(double %s, <3 x double> %v, <3 x i1> %m,
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v3f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v10, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v10
-; ZVFHMIN-NEXT:    ret
   %r = call double @llvm.vp.reduce.fadd.v3f64(double %s, <3 x double> %v, <3 x i1> %m, i32 %evl)
   ret double %r
 }
@@ -417,15 +297,6 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32
 ; CHECK-NEXT:    vfredusum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_fadd_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v10, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfredusum.vs v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v10
-; ZVFHMIN-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl)
   ret double %r
 }
@@ -439,15 +310,6 @@ define double @vpreduce_ord_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m,
 ; CHECK-NEXT:    vfredosum.vs v10, v8, v10, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vpreduce_ord_fadd_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmv.s.f v10, fa0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfredosum.vs v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vfmv.f.s fa0, v10
-; ZVFHMIN-NEXT:    ret
   %r = call double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl)
   ret double %r
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 4310893ea43e24..69646c2af6a5bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.round.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -53,20 +53,20 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 }
 
 define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -95,22 +95,22 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.round.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -137,20 +137,20 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 }
 
 define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -179,22 +179,22 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.round.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -223,20 +223,20 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 }
 
 define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -265,24 +265,24 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vmv1r.v v10, v0
+; ZVFH-NEXT:    lui a1, %hi(.LCPI6_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v10, v12, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv1r.v v0, v10
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -311,20 +311,20 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 }
 
 define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_round_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI7_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 4
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_round_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -369,23 +369,6 @@ define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -405,21 +388,6 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.round.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -445,23 +413,6 @@ define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -481,21 +432,6 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.round.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -523,25 +459,6 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -561,21 +478,6 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.round.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -603,25 +505,6 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -641,21 +524,6 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.round.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -681,23 +549,6 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -717,21 +568,6 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -759,25 +595,6 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI18_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -797,21 +614,6 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -839,25 +641,6 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI20_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -877,21 +660,6 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -919,25 +687,6 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI22_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -957,21 +706,6 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -999,25 +733,6 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI24_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1037,21 +752,6 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -1129,75 +829,6 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI26_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a1, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vmflt.vf v1, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1236,40 +867,6 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_round_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v16
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index eb890edb3ced4a..b73ab8eee5c89d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.roundeven.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -53,20 +53,20 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 }
 
 define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -95,22 +95,22 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 declare <4 x half> @llvm.vp.roundeven.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -137,20 +137,20 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 }
 
 define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -179,22 +179,22 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 declare <8 x half> @llvm.vp.roundeven.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -223,20 +223,20 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 }
 
 define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -265,24 +265,24 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vmv1r.v v10, v0
+; ZVFH-NEXT:    lui a1, %hi(.LCPI6_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v10, v12, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv1r.v v0, v10
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -311,20 +311,20 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 }
 
 define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundeven_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI7_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 0
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -369,23 +369,6 @@ define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -405,21 +388,6 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.roundeven.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -445,23 +413,6 @@ define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -481,21 +432,6 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.roundeven.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -523,25 +459,6 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -561,21 +478,6 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.roundeven.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -603,25 +505,6 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -641,21 +524,6 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.roundeven.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -681,23 +549,6 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -717,21 +568,6 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -759,25 +595,6 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI18_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -797,21 +614,6 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -839,25 +641,6 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI20_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -877,21 +660,6 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -919,25 +687,6 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI22_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -957,21 +706,6 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -999,25 +733,6 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI24_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1037,21 +752,6 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -1129,75 +829,6 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI26_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a1, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vmflt.vf v1, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1236,40 +867,6 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v16
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 9088ec2bb7aa71..23d996da971a33 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -1,32 +1,32 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI0_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI0_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -53,20 +53,20 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext
 }
 
 define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI1_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI1_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -95,22 +95,22 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
 declare <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI2_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI2_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -137,20 +137,20 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext
 }
 
 define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI3_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI3_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -179,22 +179,22 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
 declare <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI4_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI4_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -223,20 +223,20 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
 }
 
 define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI5_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI5_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -265,24 +265,24 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
 declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    lui a1, %hi(.LCPI6_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vmv1r.v v10, v0
+; ZVFH-NEXT:    lui a1, %hi(.LCPI6_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vmflt.vf v10, v12, fa5, v0.t
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv1r.v v0, v10
+; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -311,20 +311,20 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
 }
 
 define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v10, v8
-; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vp_roundtozero_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    lui a1, %hi(.LCPI7_0)
+; ZVFH-NEXT:    flh fa5, %lo(.LCPI7_0)(a1)
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    fsrmi a0, 1
+; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
+; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
+; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -369,23 +369,6 @@ define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -405,21 +388,6 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -445,23 +413,6 @@ define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -481,21 +432,6 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.roundtozero.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -523,25 +459,6 @@ define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -561,21 +478,6 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.roundtozero.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -603,25 +505,6 @@ define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -641,21 +524,6 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.roundtozero.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -681,23 +549,6 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -717,21 +568,6 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -759,25 +595,6 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v10, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI18_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v10, v12, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v10
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -797,21 +614,6 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v10, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -839,25 +641,6 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI20_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v12, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -877,21 +660,6 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v12, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -919,25 +687,6 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI22_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -957,21 +706,6 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -999,25 +733,6 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI24_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v16, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1037,21 +752,6 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -1129,75 +829,6 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI26_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a1, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vmflt.vf v1, v24, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1236,40 +867,6 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v8
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v24, v16
-; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
index c4d97dd5b2ad6a..e5375843f76d19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fabs.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -31,11 +31,11 @@ define <2 x half> @vfabs_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 }
 
 define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfabs_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.fabs.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -75,11 +75,11 @@ define <4 x half> @vfabs_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 }
 
 define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -99,11 +99,11 @@ define <4 x half> @vfabs_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.fabs.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -119,11 +119,11 @@ define <8 x half> @vfabs_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 }
 
 define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -143,11 +143,11 @@ define <8 x half> @vfabs_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.fabs.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -163,11 +163,11 @@ define <16 x half> @vfabs_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 }
 
 define <16 x half> @vfabs_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfabs_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfabs_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfabs_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -192,12 +192,6 @@ define <2 x float> @vfabs_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -208,12 +202,6 @@ define <2 x float> @vfabs_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fabs.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -228,12 +216,6 @@ define <4 x float> @vfabs_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -244,12 +226,6 @@ define <4 x float> @vfabs_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fabs.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -264,12 +240,6 @@ define <8 x float> @vfabs_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -280,12 +250,6 @@ define <8 x float> @vfabs_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fabs.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -300,12 +264,6 @@ define <16 x float> @vfabs_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -316,12 +274,6 @@ define <16 x float> @vfabs_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fabs.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -336,12 +288,6 @@ define <2 x double> @vfabs_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -352,12 +298,6 @@ define <2 x double> @vfabs_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fabs.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -372,12 +312,6 @@ define <4 x double> @vfabs_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -388,12 +322,6 @@ define <4 x double> @vfabs_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fabs.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -408,12 +336,6 @@ define <8 x double> @vfabs_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -424,12 +346,6 @@ define <8 x double> @vfabs_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fabs.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -444,12 +360,6 @@ define <15 x double> @vfabs_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -460,12 +370,6 @@ define <15 x double> @vfabs_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.fabs.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -480,12 +384,6 @@ define <16 x double> @vfabs_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -496,12 +394,6 @@ define <16 x double> @vfabs_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fabs.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -531,27 +423,6 @@ define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfabs.v v16, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v24, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; ZVFHMIN-NEXT:    vfabs.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -574,24 +445,6 @@ define <32 x double> @vfabs_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfabs_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
index 516d9788df4f2c..038c103023469d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fadd.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i3
 }
 
 define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 }
 
 define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -83,11 +83,11 @@ define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zero
 }
 
 define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -115,11 +115,11 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 declare <3 x half> @llvm.vp.fadd.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
 
 define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v3f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v3f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v3f16:
 ; ZVFHMIN:       # %bb.0:
@@ -138,11 +138,11 @@ define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3
 declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -159,11 +159,11 @@ define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i3
 }
 
 define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -182,11 +182,11 @@ define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 }
 
 define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -210,11 +210,11 @@ define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zero
 }
 
 define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -242,11 +242,11 @@ define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 declare <8 x half> @llvm.vp.fadd.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -263,11 +263,11 @@ define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i3
 }
 
 define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -286,11 +286,11 @@ define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 }
 
 define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -314,11 +314,11 @@ define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zero
 }
 
 define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -346,11 +346,11 @@ define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 declare <16 x half> @llvm.vp.fadd.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -367,11 +367,11 @@ define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %
 }
 
 define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfadd.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfadd.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -390,11 +390,11 @@ define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 }
 
 define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -418,11 +418,11 @@ define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32
 }
 
 define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfadd.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfadd_vf_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfadd_vf_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -455,12 +455,6 @@ define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -471,12 +465,6 @@ define <2 x float> @vfadd_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
@@ -489,12 +477,6 @@ define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -507,12 +489,6 @@ define <2 x float> @vfadd_vf_v2f32_commute(<2 x float> %va, float %b, <2 x i1> %
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f32_commute:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -525,12 +501,6 @@ define <2 x float> @vfadd_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -545,12 +515,6 @@ define <2 x float> @vfadd_vf_v2f32_unmasked_commute(<2 x float> %va, float %b, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f32_unmasked_commute:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -567,12 +531,6 @@ define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -583,12 +541,6 @@ define <4 x float> @vfadd_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
@@ -601,12 +553,6 @@ define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -619,12 +565,6 @@ define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -641,12 +581,6 @@ define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -657,12 +591,6 @@ define <8 x float> @vfadd_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
@@ -675,12 +603,6 @@ define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -693,12 +615,6 @@ define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -715,12 +631,6 @@ define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -731,12 +641,6 @@ define <16 x float> @vfadd_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
@@ -749,12 +653,6 @@ define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -767,12 +665,6 @@ define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -789,12 +681,6 @@ define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -805,12 +691,6 @@ define <2 x double> @vfadd_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
@@ -823,12 +703,6 @@ define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -841,12 +715,6 @@ define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -863,12 +731,6 @@ define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -879,12 +741,6 @@ define <4 x double> @vfadd_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
@@ -897,12 +753,6 @@ define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -915,12 +765,6 @@ define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -937,12 +781,6 @@ define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -953,12 +791,6 @@ define <8 x double> @vfadd_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
@@ -971,12 +803,6 @@ define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -989,12 +815,6 @@ define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -1011,12 +831,6 @@ define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1027,12 +841,6 @@ define <16 x double> @vfadd_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
@@ -1045,12 +853,6 @@ define <16 x double> @vfadd_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -1063,12 +865,6 @@ define <16 x double> @vfadd_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfadd.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfadd_vf_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
index fa3e5f29d0e301..d6907c0b075cbe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i3
 }
 
 define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 }
 
 define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -83,11 +83,11 @@ define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zero
 }
 
 define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -115,11 +115,11 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 declare <3 x half> @llvm.vp.fdiv.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
 
 define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v3f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v3f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v3f16:
 ; ZVFHMIN:       # %bb.0:
@@ -138,11 +138,11 @@ define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3
 declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -159,11 +159,11 @@ define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i3
 }
 
 define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -182,11 +182,11 @@ define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 }
 
 define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -210,11 +210,11 @@ define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zero
 }
 
 define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -242,11 +242,11 @@ define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -263,11 +263,11 @@ define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i3
 }
 
 define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -286,11 +286,11 @@ define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 }
 
 define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -314,11 +314,11 @@ define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zero
 }
 
 define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -346,11 +346,11 @@ define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -367,11 +367,11 @@ define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %
 }
 
 define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfdiv.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfdiv.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -390,11 +390,11 @@ define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 }
 
 define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -418,11 +418,11 @@ define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32
 }
 
 define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfdiv_vf_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfdiv_vf_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfdiv_vf_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -455,12 +455,6 @@ define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -471,12 +465,6 @@ define <2 x float> @vfdiv_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
@@ -489,12 +477,6 @@ define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -507,12 +489,6 @@ define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -529,12 +505,6 @@ define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -545,12 +515,6 @@ define <4 x float> @vfdiv_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
@@ -563,12 +527,6 @@ define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -581,12 +539,6 @@ define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -603,12 +555,6 @@ define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -619,12 +565,6 @@ define <8 x float> @vfdiv_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
@@ -637,12 +577,6 @@ define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -655,12 +589,6 @@ define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -677,12 +605,6 @@ define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -693,12 +615,6 @@ define <16 x float> @vfdiv_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
@@ -711,12 +627,6 @@ define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -729,12 +639,6 @@ define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -751,12 +655,6 @@ define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -767,12 +665,6 @@ define <2 x double> @vfdiv_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
@@ -785,12 +677,6 @@ define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -803,12 +689,6 @@ define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -825,12 +705,6 @@ define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -841,12 +715,6 @@ define <4 x double> @vfdiv_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
@@ -859,12 +727,6 @@ define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -877,12 +739,6 @@ define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -899,12 +755,6 @@ define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -915,12 +765,6 @@ define <8 x double> @vfdiv_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
@@ -933,12 +777,6 @@ define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -951,12 +789,6 @@ define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -973,12 +805,6 @@ define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -989,12 +815,6 @@ define <16 x double> @vfdiv_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
@@ -1007,12 +827,6 @@ define <16 x double> @vfdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -1025,12 +839,6 @@ define <16 x double> @vfdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfdiv_vf_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index c142c6752582a1..3a437b46b912a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -1,22 +1,22 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v9, v8, v10, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -34,11 +34,11 @@ define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <
 }
 
 define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v8, v9, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -58,11 +58,11 @@ define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x ha
 }
 
 define <2 x half> @vfma_vf_v2f16(<2 x half> %va, half %b, <2 x half> %vc, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -87,11 +87,11 @@ define <2 x half> @vfma_vf_v2f16(<2 x half> %va, half %b, <2 x half> %vc, <2 x i
 }
 
 define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %vc, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -120,12 +120,12 @@ define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %v
 declare <4 x half> @llvm.vp.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v9, v8, v10, v0.t
+; ZVFH-NEXT:    vmv1r.v v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -143,11 +143,11 @@ define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <
 }
 
 define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v8, v9, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -167,11 +167,11 @@ define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x ha
 }
 
 define <4 x half> @vfma_vf_v4f16(<4 x half> %va, half %b, <4 x half> %vc, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -196,11 +196,11 @@ define <4 x half> @vfma_vf_v4f16(<4 x half> %va, half %b, <4 x half> %vc, <4 x i
 }
 
 define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %vc, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -229,12 +229,12 @@ define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %v
 declare <8 x half> @llvm.vp.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v9, v8, v10, v0.t
+; ZVFH-NEXT:    vmv.v.v v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -252,11 +252,11 @@ define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <
 }
 
 define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmadd.vv v8, v9, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v8, v9, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -276,11 +276,11 @@ define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x ha
 }
 
 define <8 x half> @vfma_vf_v8f16(<8 x half> %va, half %b, <8 x half> %vc, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -305,11 +305,11 @@ define <8 x half> @vfma_vf_v8f16(<8 x half> %va, half %b, <8 x half> %vc, <8 x i
 }
 
 define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %vc, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -338,12 +338,12 @@ define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %v
 declare <16 x half> @llvm.vp.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmadd.vv v10, v8, v12, v0.t
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v10, v8, v12, v0.t
+; ZVFH-NEXT:    vmv.v.v v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -361,11 +361,11 @@ define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half>
 }
 
 define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmadd.vv v8, v10, v12
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmadd.vv v8, v10, v12
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -385,11 +385,11 @@ define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16
 }
 
 define <16 x half> @vfma_vf_v16f16(<16 x half> %va, half %b, <16 x half> %vc, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -414,11 +414,11 @@ define <16 x half> @vfma_vf_v16f16(<16 x half> %va, half %b, <16 x half> %vc, <1
 }
 
 define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half> %vc, i32 zeroext %evl) {
-; CHECK-LABEL: vfma_vf_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfma_vf_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmadd.vf v8, fa0, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfma_vf_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -453,13 +453,6 @@ define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %
 ; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -470,12 +463,6 @@ define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v9, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl)
@@ -488,12 +475,6 @@ define <2 x float> @vfma_vf_v2f32(<2 x float> %va, float %b, <2 x float> %vc, <2
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fma.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> %m, i32 %evl)
@@ -506,12 +487,6 @@ define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -529,13 +504,6 @@ define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %
 ; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -546,12 +514,6 @@ define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v9, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl)
@@ -564,12 +526,6 @@ define <4 x float> @vfma_vf_v4f32(<4 x float> %va, float %b, <4 x float> %vc, <4
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fma.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> %m, i32 %evl)
@@ -582,12 +538,6 @@ define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -605,13 +555,6 @@ define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %
 ; CHECK-NEXT:    vfmadd.vv v10, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v10, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -622,12 +565,6 @@ define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v10, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl)
@@ -640,12 +577,6 @@ define <8 x float> @vfma_vf_v8f32(<8 x float> %va, float %b, <8 x float> %vc, <8
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fma.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> %m, i32 %evl)
@@ -658,12 +589,6 @@ define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v10
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -681,13 +606,6 @@ define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x flo
 ; CHECK-NEXT:    vfmadd.vv v12, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v12, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -698,12 +616,6 @@ define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v12, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl)
@@ -716,12 +628,6 @@ define <16 x float> @vfma_vf_v16f32(<16 x float> %va, float %b, <16 x float> %vc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fma.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> %m, i32 %evl)
@@ -734,12 +640,6 @@ define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x f
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v12
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -757,13 +657,6 @@ define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x doubl
 ; CHECK-NEXT:    vfmadd.vv v9, v8, v10, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -774,12 +667,6 @@ define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v9, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl)
@@ -792,12 +679,6 @@ define <2 x double> @vfma_vf_v2f64(<2 x double> %va, double %b, <2 x double> %vc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fma.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> %m, i32 %evl)
@@ -810,12 +691,6 @@ define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x do
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v9
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -833,13 +708,6 @@ define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x doubl
 ; CHECK-NEXT:    vfmadd.vv v10, v8, v12, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v10, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -850,12 +718,6 @@ define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v10, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl)
@@ -868,12 +730,6 @@ define <4 x double> @vfma_vf_v4f64(<4 x double> %va, double %b, <4 x double> %vc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fma.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> %m, i32 %evl)
@@ -886,12 +742,6 @@ define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x do
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v10
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -909,13 +759,6 @@ define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x doubl
 ; CHECK-NEXT:    vfmadd.vv v12, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v12, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -926,12 +769,6 @@ define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v12, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl)
@@ -944,12 +781,6 @@ define <8 x double> @vfma_vf_v8f64(<8 x double> %va, double %b, <8 x double> %vc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fma.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> %m, i32 %evl)
@@ -962,12 +793,6 @@ define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x do
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v12
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -987,15 +812,6 @@ define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x
 ; CHECK-NEXT:    vfmadd.vv v16, v8, v24, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v16
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -1008,14 +824,6 @@ define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.fma.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl)
@@ -1033,15 +841,6 @@ define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
 ; CHECK-NEXT:    vfmadd.vv v16, v8, v24, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v8, v24, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v8, v16
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -1054,14 +853,6 @@ define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v16, v24
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl)
@@ -1074,12 +865,6 @@ define <16 x double> @vfma_vf_v16f64(<16 x double> %va, double %b, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fma.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> %m, i32 %evl)
@@ -1092,12 +877,6 @@ define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmadd.vf v8, fa0, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vf_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vf v8, fa0, v16
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -1186,84 +965,6 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 5
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT:    addi a1, a2, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    li a3, 24
-; ZVFHMIN-NEXT:    mul a1, a1, a3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a2)
-; ZVFHMIN-NEXT:    addi a1, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    mv a0, a4
-; ZVFHMIN-NEXT:    bltu a4, a1, .LBB50_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB50_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    addi a0, a4, -16
-; ZVFHMIN-NEXT:    sltu a1, a4, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    li a1, 24
-; ZVFHMIN-NEXT:    mul a0, a0, a1
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmadd.vv v16, v24, v8, v0.t
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 5
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -1331,69 +1032,6 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfma_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    li a3, 24
-; ZVFHMIN-NEXT:    mul a1, a1, a3
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT:    addi a1, a2, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 4
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    addi a1, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vle64.v v24, (a2)
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    add a1, sp, a1
-; ZVFHMIN-NEXT:    addi a1, a1, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vle64.v v0, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    mv a0, a4
-; ZVFHMIN-NEXT:    bltu a4, a1, .LBB51_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB51_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmadd.vv v0, v8, v24
-; ZVFHMIN-NEXT:    addi a0, a4, -16
-; ZVFHMIN-NEXT:    sltu a1, a4, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 4
-; ZVFHMIN-NEXT:    add a0, sp, a0
-; ZVFHMIN-NEXT:    addi a0, a0, 16
-; ZVFHMIN-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmadd.vv v24, v16, v8
-; ZVFHMIN-NEXT:    vmv8r.v v8, v0
-; ZVFHMIN-NEXT:    vmv.v.v v16, v24
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    li a1, 24
-; ZVFHMIN-NEXT:    mul a0, a0, a1
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.fma.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index 03e597237a9408..52366f7986ebbc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.maxnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
 }
 
 define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -57,11 +57,11 @@ define <2 x half> @vfmax_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 declare <4 x half> @llvm.vp.maxnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -78,11 +78,11 @@ define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
 }
 
 define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -103,11 +103,11 @@ define <4 x half> @vfmax_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 declare <8 x half> @llvm.vp.maxnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -124,11 +124,11 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
 }
 
 define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -149,11 +149,11 @@ define <8 x half> @vfmax_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 declare <16 x half> @llvm.vp.maxnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -170,11 +170,11 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
 }
 
 define <16 x half> @vfmax_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmax_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -200,12 +200,6 @@ define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -216,12 +210,6 @@ define <2 x float> @vfmax_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -236,12 +224,6 @@ define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -252,12 +234,6 @@ define <4 x float> @vfmax_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -272,12 +248,6 @@ define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -288,12 +258,6 @@ define <8 x float> @vfmax_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -308,12 +272,6 @@ define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -324,12 +282,6 @@ define <16 x float> @vfmax_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -344,12 +296,6 @@ define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -360,12 +306,6 @@ define <2 x double> @vfmax_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -380,12 +320,6 @@ define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -396,12 +330,6 @@ define <4 x double> @vfmax_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -416,12 +344,6 @@ define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -432,12 +354,6 @@ define <8 x double> @vfmax_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -452,12 +368,6 @@ define <15 x double> @vfmax_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -468,12 +378,6 @@ define <15 x double> @vfmax_vv_v15f64_unmasked(<15 x double> %va, <15 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
@@ -488,12 +392,6 @@ define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -504,12 +402,6 @@ define <16 x double> @vfmax_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -558,46 +450,6 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    addi a1, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    mv a0, a2
-; ZVFHMIN-NEXT:    bltu a2, a1, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v24, v0.t
-; ZVFHMIN-NEXT:    addi a0, a2, -16
-; ZVFHMIN-NEXT:    sltu a1, a2, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmax.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -624,28 +476,6 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    vle64.v v0, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    mv a0, a2
-; ZVFHMIN-NEXT:    bltu a2, a1, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v0
-; ZVFHMIN-NEXT:    addi a0, a2, -16
-; ZVFHMIN-NEXT:    sltu a1, a2, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v16, v16, v24
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.maxnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
index ecb3681bfb459f..bd7a97e2ed7195 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.maxnum.v2f16(<2 x half>, <2 x half>)
 
 define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) {
-; CHECK-LABEL: vfmax_v2f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v2f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v2f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) {
 }
 
 define <2 x half> @vfmax_v2f16_vf(<2 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v2f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v2f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v2f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -60,11 +60,11 @@ define <2 x half> @vfmax_v2f16_vf(<2 x half> %a, half %b) {
 }
 
 define <2 x half> @vfmax_v2f16_fv(<2 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v2f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v2f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v2f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -90,11 +90,11 @@ define <2 x half> @vfmax_v2f16_fv(<2 x half> %a, half %b) {
 declare <4 x half> @llvm.maxnum.v4f16(<4 x half>, <4 x half>)
 
 define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) {
-; CHECK-LABEL: vfmax_v4f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v4f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v4f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -111,11 +111,11 @@ define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) {
 }
 
 define <4 x half> @vfmax_v4f16_vf(<4 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v4f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v4f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v4f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -139,11 +139,11 @@ define <4 x half> @vfmax_v4f16_vf(<4 x half> %a, half %b) {
 }
 
 define <4 x half> @vfmax_v4f16_fv(<4 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v4f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v4f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v4f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -169,11 +169,11 @@ define <4 x half> @vfmax_v4f16_fv(<4 x half> %a, half %b) {
 declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>)
 
 define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) {
-; CHECK-LABEL: vfmax_v8f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v8f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v8f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -190,11 +190,11 @@ define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) {
 }
 
 define <8 x half> @vfmax_v8f16_vf(<8 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v8f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v8f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v8f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -218,11 +218,11 @@ define <8 x half> @vfmax_v8f16_vf(<8 x half> %a, half %b) {
 }
 
 define <8 x half> @vfmax_v8f16_fv(<8 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v8f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v8f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v8f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -248,11 +248,11 @@ define <8 x half> @vfmax_v8f16_fv(<8 x half> %a, half %b) {
 declare <16 x half> @llvm.maxnum.v16f16(<16 x half>, <16 x half>)
 
 define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) {
-; CHECK-LABEL: vfmax_v16f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmax.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v16f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmax.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v16f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -269,11 +269,11 @@ define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) {
 }
 
 define <16 x half> @vfmax_v16f16_vf(<16 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v16f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v16f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v16f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -297,11 +297,11 @@ define <16 x half> @vfmax_v16f16_vf(<16 x half> %a, half %b) {
 }
 
 define <16 x half> @vfmax_v16f16_fv(<16 x half> %a, half %b) {
-; CHECK-LABEL: vfmax_v16f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmax.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmax_v16f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmax.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmax_v16f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -332,12 +332,6 @@ define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %b)
   ret <2 x float> %v
 }
@@ -348,12 +342,6 @@ define <2 x float> @vfmax_v2f32_vf(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x float> poison, float %b, i32 0
   %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %a, <2 x float> %splat)
@@ -366,12 +354,6 @@ define <2 x float> @vfmax_v2f32_fv(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x float> poison, float %b, i32 0
   %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.maxnum.v2f32(<2 x float> %splat, <2 x float> %a)
@@ -386,12 +368,6 @@ define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
   ret <4 x float> %v
 }
@@ -402,12 +378,6 @@ define <4 x float> @vfmax_v4f32_vf(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x float> poison, float %b, i32 0
   %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %splat)
@@ -420,12 +390,6 @@ define <4 x float> @vfmax_v4f32_fv(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x float> poison, float %b, i32 0
   %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %splat, <4 x float> %a)
@@ -440,12 +404,6 @@ define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %b)
   ret <8 x float> %v
 }
@@ -456,12 +414,6 @@ define <8 x float> @vfmax_v8f32_vf(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x float> poison, float %b, i32 0
   %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %a, <8 x float> %splat)
@@ -474,12 +426,6 @@ define <8 x float> @vfmax_v8f32_fv(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x float> poison, float %b, i32 0
   %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.maxnum.v8f32(<8 x float> %splat, <8 x float> %a)
@@ -494,12 +440,6 @@ define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %b)
   ret <16 x float> %v
 }
@@ -510,12 +450,6 @@ define <16 x float> @vfmax_v16f32_vf(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x float> poison, float %b, i32 0
   %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %a, <16 x float> %splat)
@@ -528,12 +462,6 @@ define <16 x float> @vfmax_v16f32_fv(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x float> poison, float %b, i32 0
   %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.maxnum.v16f32(<16 x float> %splat, <16 x float> %a)
@@ -548,12 +476,6 @@ define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
   ret <2 x double> %v
 }
@@ -564,12 +486,6 @@ define <2 x double> @vfmax_v2f64_vf(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x double> poison, double %b, i32 0
   %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %splat)
@@ -582,12 +498,6 @@ define <2 x double> @vfmax_v2f64_fv(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v2f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x double> poison, double %b, i32 0
   %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %splat, <2 x double> %a)
@@ -602,12 +512,6 @@ define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %b)
   ret <4 x double> %v
 }
@@ -618,12 +522,6 @@ define <4 x double> @vfmax_v4f64_vf(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x double> poison, double %b, i32 0
   %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %a, <4 x double> %splat)
@@ -636,12 +534,6 @@ define <4 x double> @vfmax_v4f64_fv(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v4f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x double> poison, double %b, i32 0
   %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.maxnum.v4f64(<4 x double> %splat, <4 x double> %a)
@@ -656,12 +548,6 @@ define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %b)
   ret <8 x double> %v
 }
@@ -672,12 +558,6 @@ define <8 x double> @vfmax_v8f64_vf(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x double> poison, double %b, i32 0
   %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %a, <8 x double> %splat)
@@ -690,12 +570,6 @@ define <8 x double> @vfmax_v8f64_fv(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v8f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x double> poison, double %b, i32 0
   %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.maxnum.v8f64(<8 x double> %splat, <8 x double> %a)
@@ -710,12 +584,6 @@ define <16 x double> @vfmax_v16f64_vv(<16 x double> %a, <16 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %b)
   ret <16 x double> %v
 }
@@ -726,12 +594,6 @@ define <16 x double> @vfmax_v16f64_vf(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x double> poison, double %b, i32 0
   %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %a, <16 x double> %splat)
@@ -744,12 +606,6 @@ define <16 x double> @vfmax_v16f64_fv(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmax.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmax_v16f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmax.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x double> poison, double %b, i32 0
   %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.maxnum.v16f64(<16 x double> %splat, <16 x double> %a)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 383f82cc8a961b..364fe703f0843d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.minnum.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i
 }
 
 define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -57,11 +57,11 @@ define <2 x half> @vfmin_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %vb, i32 z
 declare <4 x half> @llvm.vp.minnum.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -78,11 +78,11 @@ define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i
 }
 
 define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -103,11 +103,11 @@ define <4 x half> @vfmin_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %vb, i32 z
 declare <8 x half> @llvm.vp.minnum.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -124,11 +124,11 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
 }
 
 define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -149,11 +149,11 @@ define <8 x half> @vfmin_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %vb, i32 z
 declare <16 x half> @llvm.vp.minnum.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -170,11 +170,11 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
 }
 
 define <16 x half> @vfmin_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %vb, i32 zeroext %evl) {
-; CHECK-LABEL: vfmin_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -200,12 +200,6 @@ define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -216,12 +210,6 @@ define <2 x float> @vfmin_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -236,12 +224,6 @@ define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -252,12 +234,6 @@ define <4 x float> @vfmin_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -272,12 +248,6 @@ define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -288,12 +258,6 @@ define <8 x float> @vfmin_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %vb, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -308,12 +272,6 @@ define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -324,12 +282,6 @@ define <16 x float> @vfmin_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %vb
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -344,12 +296,6 @@ define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -360,12 +306,6 @@ define <2 x double> @vfmin_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -380,12 +320,6 @@ define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -396,12 +330,6 @@ define <4 x double> @vfmin_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -416,12 +344,6 @@ define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -432,12 +354,6 @@ define <8 x double> @vfmin_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %vb,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -452,12 +368,6 @@ define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -468,12 +378,6 @@ define <15 x double> @vfmin_vv_v15f64_unmasked(<15 x double> %va, <15 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
@@ -488,12 +392,6 @@ define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -504,12 +402,6 @@ define <16 x double> @vfmin_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -558,46 +450,6 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi sp, sp, -16
-; ZVFHMIN-NEXT:    .cfi_def_cfa_offset 16
-; ZVFHMIN-NEXT:    csrr a1, vlenb
-; ZVFHMIN-NEXT:    slli a1, a1, 3
-; ZVFHMIN-NEXT:    sub sp, sp, a1
-; ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    addi a1, sp, 16
-; ZVFHMIN-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vi v1, v0, 2
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    mv a0, a2
-; ZVFHMIN-NEXT:    bltu a2, a1, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v24, v0.t
-; ZVFHMIN-NEXT:    addi a0, a2, -16
-; ZVFHMIN-NEXT:    sltu a1, a2, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v1
-; ZVFHMIN-NEXT:    addi a0, sp, 16
-; ZVFHMIN-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT:    csrr a0, vlenb
-; ZVFHMIN-NEXT:    slli a0, a0, 3
-; ZVFHMIN-NEXT:    add sp, sp, a0
-; ZVFHMIN-NEXT:    addi sp, sp, 16
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -624,28 +476,6 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v16, v16, v24
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    addi a1, a0, 128
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v24, (a1)
-; ZVFHMIN-NEXT:    vle64.v v0, (a0)
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:    mv a0, a2
-; ZVFHMIN-NEXT:    bltu a2, a1, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a0, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v0
-; ZVFHMIN-NEXT:    addi a0, a2, -16
-; ZVFHMIN-NEXT:    sltu a1, a2, a0
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    and a0, a1, a0
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v16, v16, v24
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.minnum.v32f64(<32 x double> %va, <32 x double> %vb, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
index e3cd75c3aabf1d..6f864b440bacf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>)
 
 define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) {
-; CHECK-LABEL: vfmin_v2f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v2f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v2f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) {
 }
 
 define <2 x half> @vfmin_v2f16_vf(<2 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v2f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v2f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v2f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -60,11 +60,11 @@ define <2 x half> @vfmin_v2f16_vf(<2 x half> %a, half %b) {
 }
 
 define <2 x half> @vfmin_v2f16_fv(<2 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v2f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v2f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v2f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -90,11 +90,11 @@ define <2 x half> @vfmin_v2f16_fv(<2 x half> %a, half %b) {
 declare <4 x half> @llvm.minnum.v4f16(<4 x half>, <4 x half>)
 
 define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) {
-; CHECK-LABEL: vfmin_v4f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v4f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v4f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -111,11 +111,11 @@ define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) {
 }
 
 define <4 x half> @vfmin_v4f16_vf(<4 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v4f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v4f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v4f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -139,11 +139,11 @@ define <4 x half> @vfmin_v4f16_vf(<4 x half> %a, half %b) {
 }
 
 define <4 x half> @vfmin_v4f16_fv(<4 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v4f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v4f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v4f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -169,11 +169,11 @@ define <4 x half> @vfmin_v4f16_fv(<4 x half> %a, half %b) {
 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>)
 
 define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) {
-; CHECK-LABEL: vfmin_v8f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v8f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v8f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -190,11 +190,11 @@ define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) {
 }
 
 define <8 x half> @vfmin_v8f16_vf(<8 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v8f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v8f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v8f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -218,11 +218,11 @@ define <8 x half> @vfmin_v8f16_vf(<8 x half> %a, half %b) {
 }
 
 define <8 x half> @vfmin_v8f16_fv(<8 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v8f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v8f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v8f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -248,11 +248,11 @@ define <8 x half> @vfmin_v8f16_fv(<8 x half> %a, half %b) {
 declare <16 x half> @llvm.minnum.v16f16(<16 x half>, <16 x half>)
 
 define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) {
-; CHECK-LABEL: vfmin_v16f16_vv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmin.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v16f16_vv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmin.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v16f16_vv:
 ; ZVFHMIN:       # %bb.0:
@@ -269,11 +269,11 @@ define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) {
 }
 
 define <16 x half> @vfmin_v16f16_vf(<16 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v16f16_vf:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v16f16_vf:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v16f16_vf:
 ; ZVFHMIN:       # %bb.0:
@@ -297,11 +297,11 @@ define <16 x half> @vfmin_v16f16_vf(<16 x half> %a, half %b) {
 }
 
 define <16 x half> @vfmin_v16f16_fv(<16 x half> %a, half %b) {
-; CHECK-LABEL: vfmin_v16f16_fv:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT:    vfmin.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmin_v16f16_fv:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmin.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmin_v16f16_fv:
 ; ZVFHMIN:       # %bb.0:
@@ -332,12 +332,6 @@ define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %b)
   ret <2 x float> %v
 }
@@ -348,12 +342,6 @@ define <2 x float> @vfmin_v2f32_vf(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x float> poison, float %b, i32 0
   %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %a, <2 x float> %splat)
@@ -366,12 +354,6 @@ define <2 x float> @vfmin_v2f32_fv(<2 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x float> poison, float %b, i32 0
   %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.minnum.v2f32(<2 x float> %splat, <2 x float> %a)
@@ -386,12 +368,6 @@ define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
   ret <4 x float> %v
 }
@@ -402,12 +378,6 @@ define <4 x float> @vfmin_v4f32_vf(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x float> poison, float %b, i32 0
   %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %splat)
@@ -420,12 +390,6 @@ define <4 x float> @vfmin_v4f32_fv(<4 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x float> poison, float %b, i32 0
   %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.minnum.v4f32(<4 x float> %splat, <4 x float> %a)
@@ -440,12 +404,6 @@ define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %b)
   ret <8 x float> %v
 }
@@ -456,12 +414,6 @@ define <8 x float> @vfmin_v8f32_vf(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x float> poison, float %b, i32 0
   %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %a, <8 x float> %splat)
@@ -474,12 +426,6 @@ define <8 x float> @vfmin_v8f32_fv(<8 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x float> poison, float %b, i32 0
   %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.minnum.v8f32(<8 x float> %splat, <8 x float> %a)
@@ -494,12 +440,6 @@ define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f32_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %b)
   ret <16 x float> %v
 }
@@ -510,12 +450,6 @@ define <16 x float> @vfmin_v16f32_vf(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f32_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x float> poison, float %b, i32 0
   %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %a, <16 x float> %splat)
@@ -528,12 +462,6 @@ define <16 x float> @vfmin_v16f32_fv(<16 x float> %a, float %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f32_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x float> poison, float %b, i32 0
   %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.minnum.v16f32(<16 x float> %splat, <16 x float> %a)
@@ -548,12 +476,6 @@ define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
   ret <2 x double> %v
 }
@@ -564,12 +486,6 @@ define <2 x double> @vfmin_v2f64_vf(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x double> poison, double %b, i32 0
   %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %splat)
@@ -582,12 +498,6 @@ define <2 x double> @vfmin_v2f64_fv(<2 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v2f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x double> poison, double %b, i32 0
   %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.minnum.v2f64(<2 x double> %splat, <2 x double> %a)
@@ -602,12 +512,6 @@ define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %b)
   ret <4 x double> %v
 }
@@ -618,12 +522,6 @@ define <4 x double> @vfmin_v4f64_vf(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x double> poison, double %b, i32 0
   %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %a, <4 x double> %splat)
@@ -636,12 +534,6 @@ define <4 x double> @vfmin_v4f64_fv(<4 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v4f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x double> poison, double %b, i32 0
   %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.minnum.v4f64(<4 x double> %splat, <4 x double> %a)
@@ -656,12 +548,6 @@ define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %b)
   ret <8 x double> %v
 }
@@ -672,12 +558,6 @@ define <8 x double> @vfmin_v8f64_vf(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x double> poison, double %b, i32 0
   %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %a, <8 x double> %splat)
@@ -690,12 +570,6 @@ define <8 x double> @vfmin_v8f64_fv(<8 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v8f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x double> poison, double %b, i32 0
   %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.minnum.v8f64(<8 x double> %splat, <8 x double> %a)
@@ -710,12 +584,6 @@ define <16 x double> @vfmin_v16f64_vv(<16 x double> %a, <16 x double> %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f64_vv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %b)
   ret <16 x double> %v
 }
@@ -726,12 +594,6 @@ define <16 x double> @vfmin_v16f64_vf(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f64_vf:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x double> poison, double %b, i32 0
   %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %a, <16 x double> %splat)
@@ -744,12 +606,6 @@ define <16 x double> @vfmin_v16f64_fv(<16 x double> %a, double %b) {
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmin.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmin_v16f64_fv:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmin.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x double> poison, double %b, i32 0
   %splat = shufflevector <16 x double> %head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.minnum.v16f64(<16 x double> %splat, <16 x double> %a)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
index 672e54da959c14..f7e37401dfc1cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fmul.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i3
 }
 
 define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 }
 
 define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -83,11 +83,11 @@ define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zero
 }
 
 define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -115,11 +115,11 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 declare <3 x half> @llvm.vp.fmul.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
 
 define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v3f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v3f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v3f16:
 ; ZVFHMIN:       # %bb.0:
@@ -138,11 +138,11 @@ define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3
 declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -159,11 +159,11 @@ define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i3
 }
 
 define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -182,11 +182,11 @@ define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 }
 
 define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -210,11 +210,11 @@ define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zero
 }
 
 define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -242,11 +242,11 @@ define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 declare <8 x half> @llvm.vp.fmul.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -263,11 +263,11 @@ define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i3
 }
 
 define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -286,11 +286,11 @@ define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 }
 
 define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -314,11 +314,11 @@ define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zero
 }
 
 define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -346,11 +346,11 @@ define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 declare <16 x half> @llvm.vp.fmul.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -367,11 +367,11 @@ define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %
 }
 
 define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmul.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmul.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -390,11 +390,11 @@ define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 }
 
 define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -418,11 +418,11 @@ define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32
 }
 
 define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfmul_vf_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfmul.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfmul_vf_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfmul_vf_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -455,12 +455,6 @@ define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -471,12 +465,6 @@ define <2 x float> @vfmul_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
@@ -489,12 +477,6 @@ define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -507,12 +489,6 @@ define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -529,12 +505,6 @@ define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -545,12 +515,6 @@ define <4 x float> @vfmul_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
@@ -563,12 +527,6 @@ define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -581,12 +539,6 @@ define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -603,12 +555,6 @@ define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -619,12 +565,6 @@ define <8 x float> @vfmul_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
@@ -637,12 +577,6 @@ define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -655,12 +589,6 @@ define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -677,12 +605,6 @@ define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -693,12 +615,6 @@ define <16 x float> @vfmul_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
@@ -711,12 +627,6 @@ define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -729,12 +639,6 @@ define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -751,12 +655,6 @@ define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -767,12 +665,6 @@ define <2 x double> @vfmul_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
@@ -785,12 +677,6 @@ define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -803,12 +689,6 @@ define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -825,12 +705,6 @@ define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -841,12 +715,6 @@ define <4 x double> @vfmul_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
@@ -859,12 +727,6 @@ define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -877,12 +739,6 @@ define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -899,12 +755,6 @@ define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -915,12 +765,6 @@ define <8 x double> @vfmul_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
@@ -933,12 +777,6 @@ define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -951,12 +789,6 @@ define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -973,12 +805,6 @@ define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -989,12 +815,6 @@ define <16 x double> @vfmul_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
@@ -1007,12 +827,6 @@ define <16 x double> @vfmul_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -1025,12 +839,6 @@ define <16 x double> @vfmul_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfmul.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfmul_vf_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
index 0b09ceeee8d895..7e484509e8feb7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fneg.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -31,11 +31,11 @@ define <2 x half> @vfneg_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 }
 
 define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfneg_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.fneg.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -75,11 +75,11 @@ define <4 x half> @vfneg_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 }
 
 define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -99,11 +99,11 @@ define <4 x half> @vfneg_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.fneg.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -119,11 +119,11 @@ define <8 x half> @vfneg_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 }
 
 define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -143,11 +143,11 @@ define <8 x half> @vfneg_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.fneg.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -163,11 +163,11 @@ define <16 x half> @vfneg_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 }
 
 define <16 x half> @vfneg_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfneg_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfneg.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfneg_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfneg.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfneg_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -192,12 +192,6 @@ define <2 x float> @vfneg_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -208,12 +202,6 @@ define <2 x float> @vfneg_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -228,12 +216,6 @@ define <4 x float> @vfneg_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -244,12 +226,6 @@ define <4 x float> @vfneg_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -264,12 +240,6 @@ define <8 x float> @vfneg_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -280,12 +250,6 @@ define <8 x float> @vfneg_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -300,12 +264,6 @@ define <16 x float> @vfneg_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -316,12 +274,6 @@ define <16 x float> @vfneg_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -336,12 +288,6 @@ define <2 x double> @vfneg_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -352,12 +298,6 @@ define <2 x double> @vfneg_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -372,12 +312,6 @@ define <4 x double> @vfneg_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -388,12 +322,6 @@ define <4 x double> @vfneg_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -408,12 +336,6 @@ define <8 x double> @vfneg_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -424,12 +346,6 @@ define <8 x double> @vfneg_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -444,12 +360,6 @@ define <15 x double> @vfneg_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -460,12 +370,6 @@ define <15 x double> @vfneg_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.fneg.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -480,12 +384,6 @@ define <16 x double> @vfneg_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -496,12 +394,6 @@ define <16 x double> @vfneg_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -531,27 +423,6 @@ define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfneg.v v16, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v24, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; ZVFHMIN-NEXT:    vfneg.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -574,24 +445,6 @@ define <32 x double> @vfneg_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfneg.v v16, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfneg_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfneg.v v16, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
index e12b4817bdbcf6..5491c3004ce051 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.sqrt.v2f16(<2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfsqrt_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -31,11 +31,11 @@ define <2 x half> @vfsqrt_vv_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl
 }
 
 define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfsqrt_vv_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 declare <4 x half> @llvm.vp.sqrt.v4f16(<4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfsqrt_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -75,11 +75,11 @@ define <4 x half> @vfsqrt_vv_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl
 }
 
 define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -99,11 +99,11 @@ define <4 x half> @vfsqrt_vv_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 declare <8 x half> @llvm.vp.sqrt.v8f16(<8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfsqrt_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -119,11 +119,11 @@ define <8 x half> @vfsqrt_vv_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl
 }
 
 define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -143,11 +143,11 @@ define <8 x half> @vfsqrt_vv_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 declare <16 x half> @llvm.vp.sqrt.v16f16(<16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfsqrt_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -163,11 +163,11 @@ define <16 x half> @vfsqrt_vv_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext
 }
 
 define <16 x half> @vfsqrt_vv_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vfsqrt_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsqrt.v v8, v8
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsqrt_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsqrt.v v8, v8
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsqrt_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -192,12 +192,6 @@ define <2 x float> @vfsqrt_vv_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.sqrt.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -208,12 +202,6 @@ define <2 x float> @vfsqrt_vv_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.sqrt.v2f32(<2 x float> %va, <2 x i1> %m, i32 %evl)
@@ -228,12 +216,6 @@ define <4 x float> @vfsqrt_vv_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -244,12 +226,6 @@ define <4 x float> @vfsqrt_vv_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.sqrt.v4f32(<4 x float> %va, <4 x i1> %m, i32 %evl)
@@ -264,12 +240,6 @@ define <8 x float> @vfsqrt_vv_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %e
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.sqrt.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -280,12 +250,6 @@ define <8 x float> @vfsqrt_vv_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.sqrt.v8f32(<8 x float> %va, <8 x i1> %m, i32 %evl)
@@ -300,12 +264,6 @@ define <16 x float> @vfsqrt_vv_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.sqrt.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -316,12 +274,6 @@ define <16 x float> @vfsqrt_vv_v16f32_unmasked(<16 x float> %va, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.sqrt.v16f32(<16 x float> %va, <16 x i1> %m, i32 %evl)
@@ -336,12 +288,6 @@ define <2 x double> @vfsqrt_vv_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.sqrt.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -352,12 +298,6 @@ define <2 x double> @vfsqrt_vv_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.sqrt.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
@@ -372,12 +312,6 @@ define <4 x double> @vfsqrt_vv_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.sqrt.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -388,12 +322,6 @@ define <4 x double> @vfsqrt_vv_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.sqrt.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
@@ -408,12 +336,6 @@ define <8 x double> @vfsqrt_vv_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.sqrt.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -424,12 +346,6 @@ define <8 x double> @vfsqrt_vv_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.sqrt.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
@@ -444,12 +360,6 @@ define <15 x double> @vfsqrt_vv_v15f64(<15 x double> %va, <15 x i1> %m, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v15f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <15 x double> @llvm.vp.sqrt.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
   ret <15 x double> %v
 }
@@ -460,12 +370,6 @@ define <15 x double> @vfsqrt_vv_v15f64_unmasked(<15 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v15f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <15 x i1> poison, i1 true, i32 0
   %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
   %v = call <15 x double> @llvm.vp.sqrt.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
@@ -480,12 +384,6 @@ define <16 x double> @vfsqrt_vv_v16f64(<16 x double> %va, <16 x i1> %m, i32 zero
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.sqrt.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -496,12 +394,6 @@ define <16 x double> @vfsqrt_vv_v16f64_unmasked(<16 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v8, v8
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.sqrt.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
@@ -531,27 +423,6 @@ define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zero
 ; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vfsqrt.v v16, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v32f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    vslidedown.vi v24, v0, 2
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB26_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; ZVFHMIN-NEXT:    vfsqrt.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
   ret <32 x double> %v
 }
@@ -574,24 +445,6 @@ define <32 x double> @vfsqrt_vv_v32f64_unmasked(<32 x double> %va, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsqrt.v v16, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsqrt_vv_v32f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    li a2, 16
-; ZVFHMIN-NEXT:    mv a1, a0
-; ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    li a1, 16
-; ZVFHMIN-NEXT:  .LBB27_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    addi a1, a0, -16
-; ZVFHMIN-NEXT:    sltu a0, a0, a1
-; ZVFHMIN-NEXT:    addi a0, a0, -1
-; ZVFHMIN-NEXT:    and a0, a0, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v16, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <32 x i1> poison, i1 true, i32 0
   %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
   %v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
index 9564a787e0e17b..2bfddab8e42f37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll
@@ -1,21 +1,21 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d -riscv-v-vector-bits-min=128 \
-; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFHMIN
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
 
 declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32)
 
 define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -32,11 +32,11 @@ define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i3
 }
 
 define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -55,11 +55,11 @@ define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 ze
 }
 
 define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v2f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v2f16:
 ; ZVFHMIN:       # %bb.0:
@@ -83,11 +83,11 @@ define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zero
 }
 
 define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v2f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v2f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -115,11 +115,11 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext
 declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32)
 
 define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v3f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v3f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v3f16:
 ; ZVFHMIN:       # %bb.0:
@@ -138,11 +138,11 @@ define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i3
 declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32)
 
 define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -159,11 +159,11 @@ define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i3
 }
 
 define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -182,11 +182,11 @@ define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 ze
 }
 
 define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v4f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v4f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v4f16:
 ; ZVFHMIN:       # %bb.0:
@@ -210,11 +210,11 @@ define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zero
 }
 
 define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v4f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v4f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -242,11 +242,11 @@ define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext
 declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32)
 
 define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -263,11 +263,11 @@ define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i3
 }
 
 define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v9
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v9
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -286,11 +286,11 @@ define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 ze
 }
 
 define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v8f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
@@ -314,11 +314,11 @@ define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zero
 }
 
 define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v8f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v8f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -346,11 +346,11 @@ define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext
 declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32)
 
 define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v10, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v10, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -367,11 +367,11 @@ define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %
 }
 
 define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vv_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsub.vv v8, v8, v10
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vv_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsub.vv v8, v8, v10
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vv_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -390,11 +390,11 @@ define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i3
 }
 
 define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v16f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v16f16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0, v0.t
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v16f16:
 ; ZVFHMIN:       # %bb.0:
@@ -418,11 +418,11 @@ define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32
 }
 
 define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfsub_vf_v16f16_unmasked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vfsub.vf v8, v8, fa0
-; CHECK-NEXT:    ret
+; ZVFH-LABEL: vfsub_vf_v16f16_unmasked:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vfsub.vf v8, v8, fa0
+; ZVFH-NEXT:    ret
 ;
 ; ZVFHMIN-LABEL: vfsub_vf_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
@@ -455,12 +455,6 @@ define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
   ret <2 x float> %v
 }
@@ -471,12 +465,6 @@ define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
@@ -489,12 +477,6 @@ define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v2f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
@@ -507,12 +489,6 @@ define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v2f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x float> poison, float %b, i32 0
   %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -529,12 +505,6 @@ define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
   ret <4 x float> %v
 }
@@ -545,12 +515,6 @@ define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
@@ -563,12 +527,6 @@ define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
@@ -581,12 +539,6 @@ define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v4f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x float> poison, float %b, i32 0
   %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -603,12 +555,6 @@ define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
   ret <8 x float> %v
 }
@@ -619,12 +565,6 @@ define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
@@ -637,12 +577,6 @@ define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
@@ -655,12 +589,6 @@ define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroe
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v8f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x float> poison, float %b, i32 0
   %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -677,12 +605,6 @@ define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
   ret <16 x float> %v
 }
@@ -693,12 +615,6 @@ define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
@@ -711,12 +627,6 @@ define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v16f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
@@ -729,12 +639,6 @@ define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v16f32_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x float> poison, float %b, i32 0
   %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0
@@ -751,12 +655,6 @@ define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
   ret <2 x double> %v
 }
@@ -767,12 +665,6 @@ define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v9
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <2 x i1> poison, i1 true, i32 0
   %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
@@ -785,12 +677,6 @@ define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v2f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
@@ -803,12 +689,6 @@ define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v2f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <2 x double> poison, double %b, i32 0
   %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer
   %head = insertelement <2 x i1> poison, i1 true, i32 0
@@ -825,12 +705,6 @@ define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
   ret <4 x double> %v
 }
@@ -841,12 +715,6 @@ define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <4 x i1> poison, i1 true, i32 0
   %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
@@ -859,12 +727,6 @@ define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
@@ -877,12 +739,6 @@ define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v4f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <4 x double> poison, double %b, i32 0
   %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer
   %head = insertelement <4 x i1> poison, i1 true, i32 0
@@ -899,12 +755,6 @@ define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
   ret <8 x double> %v
 }
@@ -915,12 +765,6 @@ define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <8 x i1> poison, i1 true, i32 0
   %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
@@ -933,12 +777,6 @@ define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i3
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v8f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
@@ -951,12 +789,6 @@ define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v8f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <8 x double> poison, double %b, i32 0
   %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
   %head = insertelement <8 x i1> poison, i1 true, i32 0
@@ -973,12 +805,6 @@ define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v16, v0.t
-; ZVFHMIN-NEXT:    ret
   %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
   ret <16 x double> %v
 }
@@ -989,12 +815,6 @@ define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double>
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vv v8, v8, v16
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vv_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v16
-; ZVFHMIN-NEXT:    ret
   %head = insertelement <16 x i1> poison, i1 true, i32 0
   %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
@@ -1007,12 +827,6 @@ define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0, v0.t
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v16f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0, v0.t
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
@@ -1025,12 +839,6 @@ define <16 x double> @vfsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; CHECK-NEXT:    vfsub.vf v8, v8, fa0
 ; CHECK-NEXT:    ret
-;
-; ZVFHMIN-LABEL: vfsub_vf_v16f64_unmasked:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    ret
   %elt.head = insertelement <16 x double> poison, double %b, i32 0
   %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer
   %head = insertelement <16 x i1> poison, i1 true, i32 0


        


More information about the llvm-commits mailing list