[llvm] abdf4ca - [RISCV] Simplify fixed-vector-fp.ll run lines. NFC

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 6 09:20:25 PDT 2024


Author: Luke Lau
Date: 2024-10-07T00:20:13+08:00
New Revision: abdf4ca4f12c834e0541f996596584a65fda44ef

URL: https://github.com/llvm/llvm-project/commit/abdf4ca4f12c834e0541f996596584a65fda44ef
DIFF: https://github.com/llvm/llvm-project/commit/abdf4ca4f12c834e0541f996596584a65fda44ef.diff

LOG: [RISCV] Simplify fixed-vector-fp.ll run lines. NFC

This removes the different scalar fp16 configurations i.e. zfh and
zfhmin, since all these ops should be able to be lowered without
scalarizing.

This ends up revealing a couple of cases where we end up scalarizing
unexpectedly, e.g. sqrt/fabs/round with v6f16 and zvfhmin.

It also removes the zvl256b configurations, since I couldn't find
anything that specifically needed to test this.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 69faf269ae3db6..ea7829f2d6c658 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,13 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfhmin,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV32,ZVFHMIN-ZFH-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfhmin,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV64,ZVFHMIN-ZFH-RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfhmin,+zvfhmin,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV32,ZVFHMIN-ZFHIN-RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfhmin,+zvfhmin,+zvl256b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,ZVFHMIN-RV64,ZVFHMIN-ZFHIN-RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32-ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64-ZVFHMIN
 
 define void @fadd_v8f16(ptr %x, ptr %y) {
 ; ZVFH-LABEL: fadd_v8f16:
@@ -21,16 +16,16 @@ define void @fadd_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -51,17 +46,17 @@ define void @fadd_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -71,23 +66,14 @@ define void @fadd_v6f16(ptr %x, ptr %y) {
 }
 
 define void @fadd_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fadd_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfadd.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fadd_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fadd_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfadd.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = fadd <4 x float> %a, %b
@@ -123,16 +109,16 @@ define void @fsub_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -153,17 +139,17 @@ define void @fsub_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -173,23 +159,14 @@ define void @fsub_v6f16(ptr %x, ptr %y) {
 }
 
 define void @fsub_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fsub_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfsub.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fsub_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fsub_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfsub.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = fsub <4 x float> %a, %b
@@ -225,16 +202,16 @@ define void @fmul_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -255,17 +232,17 @@ define void @fmul_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -275,23 +252,14 @@ define void @fmul_v6f16(ptr %x, ptr %y) {
 }
 
 define void @fmul_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fmul_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfmul.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmul_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmul_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfmul.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = fmul <4 x float> %a, %b
@@ -327,16 +295,16 @@ define void @fdiv_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -357,17 +325,17 @@ define void @fdiv_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -377,23 +345,14 @@ define void @fdiv_v6f16(ptr %x, ptr %y) {
 }
 
 define void @fdiv_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fdiv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fdiv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fdiv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfdiv.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = fdiv <4 x float> %a, %b
@@ -428,7 +387,7 @@ define void @fneg_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: fneg_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
@@ -451,7 +410,7 @@ define void @fneg_v6f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: fneg_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
@@ -464,21 +423,13 @@ define void @fneg_v6f16(ptr %x) {
 }
 
 define void @fneg_v4f32(ptr %x) {
-; ZVFH-LABEL: fneg_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfneg.v v8, v8
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fneg_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fneg_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfneg.v v8, v8
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = fneg <4 x float> %a
   store <4 x float> %b, ptr %x
@@ -510,7 +461,7 @@ define void @fabs_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: fabs_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    addi a1, a1, -1
@@ -533,15 +484,259 @@ define void @fabs_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: fabs_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: fabs_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fmv.s fs0, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fs0
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    fmv.s fa0, fa5
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: fabs_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fmv.s fs0, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fs0
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    fmv.s fa0, fa5
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.fabs.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -550,21 +745,13 @@ define void @fabs_v6f16(ptr %x) {
 declare <6 x half> @llvm.fabs.v6f16(<6 x half>)
 
 define void @fabs_v4f32(ptr %x) {
-; ZVFH-LABEL: fabs_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v8, v8
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fabs_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v8, v8
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fabs_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v8, v8
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.fabs.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -599,7 +786,7 @@ define void @copysign_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
@@ -629,15 +816,15 @@ define void @copysign_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
 ; ZVFHMIN-NEXT:    addi a1, a1, -1
 ; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vor.vv v8, v9, v8
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
@@ -650,23 +837,14 @@ define void @copysign_v6f16(ptr %x, ptr %y) {
 declare <6 x half> @llvm.copysign.v6f16(<6 x half>, <6 x half>)
 
 define void @copysign_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: copysign_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfsgnj.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: copysign_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: copysign_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfsgnj.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = call <4 x float> @llvm.copysign.v4f32(<4 x float> %a, <4 x float> %b)
@@ -703,9 +881,9 @@ define void @copysign_vf_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    addi a2, a1, -1
@@ -733,20 +911,16 @@ define void @copysign_vf_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    lui a1, 8
+; ZVFHMIN-NEXT:    addi a2, a1, -1
+; ZVFHMIN-NEXT:    vand.vx v8, v8, a2
 ; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
-; ZVFHMIN-NEXT:    addi a1, a1, -1
-; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vor.vv v8, v8, v9
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
@@ -759,21 +933,13 @@ define void @copysign_vf_v6f16(ptr %x, half %y) {
 }
 
 define void @copysign_vf_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: copysign_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfsgnj.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: copysign_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfsgnj.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: copysign_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfsgnj.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -810,7 +976,7 @@ define void @copysign_neg_v8f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_neg_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
@@ -841,16 +1007,16 @@ define void @copysign_neg_v6f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_neg_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
 ; ZVFHMIN-NEXT:    addi a2, a1, -1
 ; ZVFHMIN-NEXT:    vand.vx v9, v9, a2
 ; ZVFHMIN-NEXT:    vand.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vor.vv v8, v9, v8
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
@@ -863,23 +1029,14 @@ define void @copysign_neg_v6f16(ptr %x, ptr %y) {
 }
 
 define void @copysign_neg_v4f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: copysign_neg_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfsgnjn.vv v8, v8, v9
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: copysign_neg_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfsgnjn.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: copysign_neg_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfsgnjn.vv v8, v8, v9
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = fneg <4 x float> %b
@@ -918,7 +1075,7 @@ define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_neg_trunc_v4f16_v4f32:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    vle32.v v9, (a1)
 ; ZVFHMIN-NEXT:    lui a1, 8
@@ -955,17 +1112,17 @@ define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: copysign_neg_trunc_v3f16_v3f32:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 3, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 3, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    vle32.v v9, (a1)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    addi a2, a1, -1
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vand.vx v8, v8, a2
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v9
 ; ZVFHMIN-NEXT:    vxor.vx v9, v10, a1
 ; ZVFHMIN-NEXT:    vand.vx v9, v9, a1
-; ZVFHMIN-NEXT:    vsetivli zero, 3, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 3, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vor.vv v8, v8, v9
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
@@ -1010,14 +1167,14 @@ define void @sqrt_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: sqrt_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsqrt.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = call <8 x half> @llvm.sqrt.v8f16(<8 x half> %a)
@@ -1035,18 +1192,259 @@ define void @sqrt_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: sqrt_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: sqrt_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fmv.s fs0, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa5, fs0
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    fmv.s fa0, fa5
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: sqrt_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fmv.s fs0, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa5, fs0
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    fmv.s fa0, fa5
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fsqrt.s fa0, fa0
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.sqrt.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -1055,21 +1453,13 @@ define void @sqrt_v6f16(ptr %x) {
 declare <6 x half> @llvm.sqrt.v6f16(<6 x half>)
 
 define void @sqrt_v4f32(ptr %x) {
-; ZVFH-LABEL: sqrt_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfsqrt.v v8, v8
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: sqrt_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfsqrt.v v8, v8
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: sqrt_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfsqrt.v v8, v8
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.sqrt.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -1105,18 +1495,18 @@ define void @fma_v8f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fma_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a2)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a1)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -1140,19 +1530,19 @@ define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fma_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a2)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a1)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -1164,25 +1554,15 @@ define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
 declare <6 x half> @llvm.fma.v6f16(<6 x half>, <6 x half>, <6 x half>)
 
 define void @fma_v4f32(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fma_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vle32.v v10, (a2)
-; ZVFH-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFH-NEXT:    vse32.v v10, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fma_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vle32.v v10, (a2)
-; ZVFHMIN-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fma_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vle32.v v10, (a2)
+; CHECK-NEXT:    vfmacc.vv v10, v8, v9
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = load <4 x float>, ptr %z
@@ -1224,20 +1604,20 @@ define void @fmsub_v8f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a2)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a1)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -1261,21 +1641,21 @@ define void @fmsub_v6f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a2)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a1)
 ; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -1287,25 +1667,15 @@ define void @fmsub_v6f16(ptr %x, ptr %y, ptr %z) {
 }
 
 define void @fnmsub_v4f32(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fnmsub_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vle32.v v10, (a2)
-; ZVFH-NEXT:    vfnmsac.vv v10, v8, v9
-; ZVFH-NEXT:    vse32.v v10, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fnmsub_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vle32.v v10, (a2)
-; ZVFHMIN-NEXT:    vfnmsac.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fnmsub_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vle32.v v10, (a2)
+; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = load <4 x float>, ptr %z
@@ -1347,16 +1717,16 @@ define void @fadd_v16f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
-; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v12, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
-; ZVFHMIN-NEXT:    vse16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v10, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v16, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v8
+; ZVFHMIN-NEXT:    vse16.v v12, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <16 x half>, ptr %x
   %b = load <16 x half>, ptr %y
@@ -1366,23 +1736,14 @@ define void @fadd_v16f16(ptr %x, ptr %y) {
 }
 
 define void @fadd_v8f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fadd_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v10, (a1)
-; ZVFH-NEXT:    vfadd.vv v8, v8, v10
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fadd_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fadd_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = load <8 x float>, ptr %y
   %c = fadd <8 x float> %a, %b
@@ -1391,23 +1752,14 @@ define void @fadd_v8f32(ptr %x, ptr %y) {
 }
 
 define void @fadd_v4f64(ptr %x, ptr %y) {
-; ZVFH-LABEL: fadd_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vle64.v v10, (a1)
-; ZVFH-NEXT:    vfadd.vv v8, v8, v10
-; ZVFH-NEXT:    vse64.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fadd_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vle64.v v9, (a1)
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse64.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fadd_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v10, (a1)
+; CHECK-NEXT:    vfadd.vv v8, v8, v10
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = load <4 x double>, ptr %y
   %c = fadd <4 x double> %a, %b
@@ -1427,16 +1779,16 @@ define void @fsub_v16f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
-; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v12, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
-; ZVFHMIN-NEXT:    vse16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v10, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v16, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v8
+; ZVFHMIN-NEXT:    vse16.v v12, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <16 x half>, ptr %x
   %b = load <16 x half>, ptr %y
@@ -1446,23 +1798,14 @@ define void @fsub_v16f16(ptr %x, ptr %y) {
 }
 
 define void @fsub_v8f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fsub_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v10, (a1)
-; ZVFH-NEXT:    vfsub.vv v8, v8, v10
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fsub_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fsub_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = load <8 x float>, ptr %y
   %c = fsub <8 x float> %a, %b
@@ -1471,23 +1814,14 @@ define void @fsub_v8f32(ptr %x, ptr %y) {
 }
 
 define void @fsub_v4f64(ptr %x, ptr %y) {
-; ZVFH-LABEL: fsub_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vle64.v v10, (a1)
-; ZVFH-NEXT:    vfsub.vv v8, v8, v10
-; ZVFH-NEXT:    vse64.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fsub_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vle64.v v9, (a1)
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse64.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fsub_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v10, (a1)
+; CHECK-NEXT:    vfsub.vv v8, v8, v10
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = load <4 x double>, ptr %y
   %c = fsub <4 x double> %a, %b
@@ -1507,16 +1841,16 @@ define void @fmul_v16f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
-; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v12, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
-; ZVFHMIN-NEXT:    vse16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v10, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v16, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v8
+; ZVFHMIN-NEXT:    vse16.v v12, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <16 x half>, ptr %x
   %b = load <16 x half>, ptr %y
@@ -1526,23 +1860,14 @@ define void @fmul_v16f16(ptr %x, ptr %y) {
 }
 
 define void @fmul_v8f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fmul_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v10, (a1)
-; ZVFH-NEXT:    vfmul.vv v8, v8, v10
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmul_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmul_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = load <8 x float>, ptr %y
   %c = fmul <8 x float> %a, %b
@@ -1551,23 +1876,14 @@ define void @fmul_v8f32(ptr %x, ptr %y) {
 }
 
 define void @fmul_v4f64(ptr %x, ptr %y) {
-; ZVFH-LABEL: fmul_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vle64.v v10, (a1)
-; ZVFH-NEXT:    vfmul.vv v8, v8, v10
-; ZVFH-NEXT:    vse64.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmul_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vle64.v v9, (a1)
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse64.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmul_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v10, (a1)
+; CHECK-NEXT:    vfmul.vv v8, v8, v10
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = load <4 x double>, ptr %y
   %c = fmul <4 x double> %a, %b
@@ -1587,16 +1903,16 @@ define void @fdiv_v16f16(ptr %x, ptr %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
-; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v12, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
-; ZVFHMIN-NEXT:    vse16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v10, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v16, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v8
+; ZVFHMIN-NEXT:    vse16.v v12, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <16 x half>, ptr %x
   %b = load <16 x half>, ptr %y
@@ -1606,23 +1922,14 @@ define void @fdiv_v16f16(ptr %x, ptr %y) {
 }
 
 define void @fdiv_v8f32(ptr %x, ptr %y) {
-; ZVFH-LABEL: fdiv_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v10, (a1)
-; ZVFH-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fdiv_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fdiv_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = load <8 x float>, ptr %y
   %c = fdiv <8 x float> %a, %b
@@ -1631,23 +1938,14 @@ define void @fdiv_v8f32(ptr %x, ptr %y) {
 }
 
 define void @fdiv_v4f64(ptr %x, ptr %y) {
-; ZVFH-LABEL: fdiv_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vle64.v v10, (a1)
-; ZVFH-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFH-NEXT:    vse64.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fdiv_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vle64.v v9, (a1)
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vse64.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fdiv_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v10, (a1)
+; CHECK-NEXT:    vfdiv.vv v8, v8, v10
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = load <4 x double>, ptr %y
   %c = fdiv <4 x double> %a, %b
@@ -1666,7 +1964,7 @@ define void @fneg_v16f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: fneg_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
 ; ZVFHMIN-NEXT:    lui a1, 8
 ; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
@@ -1679,21 +1977,13 @@ define void @fneg_v16f16(ptr %x) {
 }
 
 define void @fneg_v8f32(ptr %x) {
-; ZVFH-LABEL: fneg_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfneg.v v8, v8
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fneg_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fneg_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfneg.v v8, v8
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = fneg <8 x float> %a
   store <8 x float> %b, ptr %x
@@ -1701,21 +1991,13 @@ define void @fneg_v8f32(ptr %x) {
 }
 
 define void @fneg_v4f64(ptr %x) {
-; ZVFH-LABEL: fneg_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vfneg.v v8, v8
-; ZVFH-NEXT:    vse64.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fneg_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vfneg.v v8, v8
-; ZVFHMIN-NEXT:    vse64.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fneg_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vfneg.v v8, v8
+; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = fneg <4 x double> %a
   store <4 x double> %b, ptr %x
@@ -1735,18 +2017,18 @@ define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fma_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a2)
-; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    vle16.v v10, (a1)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
-; ZVFHMIN-NEXT:    vse16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v10, (a0)
+; ZVFHMIN-NEXT:    vle16.v v12, (a1)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v20, v10
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v20, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v8
+; ZVFHMIN-NEXT:    vse16.v v12, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <16 x half>, ptr %x
   %b = load <16 x half>, ptr %y
@@ -1758,25 +2040,15 @@ define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
 declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>)
 
 define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fma_v8f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v10, (a1)
-; ZVFH-NEXT:    vle32.v v12, (a2)
-; ZVFH-NEXT:    vfmacc.vv v12, v8, v10
-; ZVFH-NEXT:    vse32.v v12, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fma_v8f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vle32.v v10, (a2)
-; ZVFHMIN-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fma_v8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vle32.v v12, (a2)
+; CHECK-NEXT:    vfmacc.vv v12, v8, v10
+; CHECK-NEXT:    vse32.v v12, (a0)
+; CHECK-NEXT:    ret
   %a = load <8 x float>, ptr %x
   %b = load <8 x float>, ptr %y
   %c = load <8 x float>, ptr %z
@@ -1787,25 +2059,15 @@ define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
 declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
 
 define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fma_v4f64:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; ZVFH-NEXT:    vle64.v v8, (a0)
-; ZVFH-NEXT:    vle64.v v10, (a1)
-; ZVFH-NEXT:    vle64.v v12, (a2)
-; ZVFH-NEXT:    vfmacc.vv v12, v8, v10
-; ZVFH-NEXT:    vse64.v v12, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fma_v4f64:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
-; ZVFHMIN-NEXT:    vle64.v v8, (a0)
-; ZVFHMIN-NEXT:    vle64.v v9, (a1)
-; ZVFHMIN-NEXT:    vle64.v v10, (a2)
-; ZVFHMIN-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse64.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fma_v4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vle64.v v10, (a1)
+; CHECK-NEXT:    vle64.v v12, (a2)
+; CHECK-NEXT:    vfmacc.vv v12, v8, v10
+; CHECK-NEXT:    vse64.v v12, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %x
   %b = load <4 x double>, ptr %y
   %c = load <4 x double>, ptr %z
@@ -1826,17 +2088,17 @@ define void @fadd_vf_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v10, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -1857,22 +2119,18 @@ define void @fadd_vf_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v9, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -1883,21 +2141,13 @@ define void @fadd_vf_v6f16(ptr %x, half %y) {
 }
 
 define void @fadd_vf_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fadd_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fadd_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fadd_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -1933,17 +2183,17 @@ define void @fadd_fv_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_fv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -1964,22 +2214,18 @@ define void @fadd_fv_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fadd_fv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v10, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -1990,21 +2236,13 @@ define void @fadd_fv_v6f16(ptr %x, half %y) {
 }
 
 define void @fadd_fv_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fadd_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fadd_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfadd.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fadd_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfadd.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2040,17 +2278,17 @@ define void @fsub_vf_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v10, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2071,22 +2309,18 @@ define void @fsub_vf_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v9, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2097,21 +2331,13 @@ define void @fsub_vf_v6f16(ptr %x, half %y) {
 }
 
 define void @fsub_vf_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fsub_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fsub_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fsub_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfsub.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2147,17 +2373,17 @@ define void @fsub_fv_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_fv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2178,22 +2404,18 @@ define void @fsub_fv_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fsub_fv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v10, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2204,21 +2426,13 @@ define void @fsub_fv_v6f16(ptr %x, half %y) {
 }
 
 define void @fsub_fv_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fsub_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfrsub.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fsub_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfrsub.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fsub_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfrsub.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2254,17 +2468,17 @@ define void @fmul_vf_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v10, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2285,22 +2499,18 @@ define void @fmul_vf_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v9, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2311,21 +2521,13 @@ define void @fmul_vf_v6f16(ptr %x, half %y) {
 }
 
 define void @fmul_vf_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fmul_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmul_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmul_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2361,17 +2563,17 @@ define void @fmul_fv_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_fv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2392,22 +2594,18 @@ define void @fmul_fv_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fmul_fv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v10, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2418,21 +2616,13 @@ define void @fmul_fv_v6f16(ptr %x, half %y) {
 }
 
 define void @fmul_fv_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fmul_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmul_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfmul.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmul_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfmul.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2468,17 +2658,17 @@ define void @fdiv_vf_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v10, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2499,22 +2689,18 @@ define void @fdiv_vf_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v9, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v10, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2525,21 +2711,13 @@ define void @fdiv_vf_v6f16(ptr %x, half %y) {
 }
 
 define void @fdiv_vf_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fdiv_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fdiv_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fdiv_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfdiv.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2575,17 +2753,17 @@ define void @fdiv_fv_v8f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_fv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v9, a1
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = insertelement <8 x half> poison, half %y, i32 0
@@ -2606,22 +2784,18 @@ define void @fdiv_fv_v6f16(ptr %x, half %y) {
 ;
 ; ZVFHMIN-LABEL: fdiv_fv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v9, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v9, v9, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfdiv.vv v8, v10, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v9, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfdiv.vv v8, v12, v10
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = insertelement <6 x half> poison, half %y, i32 0
@@ -2632,21 +2806,13 @@ define void @fdiv_fv_v6f16(ptr %x, half %y) {
 }
 
 define void @fdiv_fv_v4f32(ptr %x, float %y) {
-; ZVFH-LABEL: fdiv_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfrdiv.vf v8, v8, fa0
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fdiv_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfrdiv.vf v8, v8, fa0
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fdiv_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfrdiv.vf v8, v8, fa0
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = insertelement <4 x float> poison, float %y, i32 0
   %c = shufflevector <4 x float> %b, <4 x float> poison, <4 x i32> zeroinitializer
@@ -2683,19 +2849,19 @@ define void @fma_vf_v8f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fma_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v10, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -2718,24 +2884,20 @@ define void @fma_vf_v6f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fma_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vle16.v v9, (a1)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v10, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v10, v10, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v10
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v11, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vle16.v v8, (a1)
+; ZVFHMIN-NEXT:    vle16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v10, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -2747,23 +2909,14 @@ define void @fma_vf_v6f16(ptr %x, ptr %y, half %z) {
 }
 
 define void @fma_vf_v4f32(ptr %x, ptr %y, float %z) {
-; ZVFH-LABEL: fma_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfmacc.vf v9, fa0, v8
-; ZVFH-NEXT:    vse32.v v9, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fma_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfmacc.vf v9, fa0, v8
-; ZVFHMIN-NEXT:    vse32.v v9, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fma_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = insertelement <4 x float> poison, float %z, i32 0
@@ -2803,19 +2956,19 @@ define void @fma_fv_v8f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fma_fv_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
 ; ZVFHMIN-NEXT:    vmv.v.x v10, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -2838,24 +2991,20 @@ define void @fma_fv_v6f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fma_fv_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vle16.v v9, (a1)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv.v.x v10, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v10, v10, a1, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v10
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v11, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vle16.v v8, (a1)
+; ZVFHMIN-NEXT:    vle16.v v9, (a0)
+; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vmv.v.x v10, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -2867,23 +3016,14 @@ define void @fma_fv_v6f16(ptr %x, ptr %y, half %z) {
 }
 
 define void @fma_fv_v4f32(ptr %x, ptr %y, float %z) {
-; ZVFH-LABEL: fma_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfmacc.vf v9, fa0, v8
-; ZVFH-NEXT:    vse32.v v9, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fma_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfmacc.vf v9, fa0, v8
-; ZVFHMIN-NEXT:    vse32.v v9, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fma_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfmacc.vf v9, fa0, v8
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = insertelement <4 x float> poison, float %z, i32 0
@@ -2923,21 +3063,21 @@ define void @fmsub_vf_v8f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_vf_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vle16.v v9, (a1)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
-; ZVFHMIN-NEXT:    vmv.v.x v10, a1
+; ZVFHMIN-NEXT:    fmv.x.w a2, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vle16.v v8, (a1)
+; ZVFHMIN-NEXT:    vle16.v v9, (a0)
+; ZVFHMIN-NEXT:    vmv.v.x v10, a2
 ; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vxor.vx v9, v9, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
+; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v8, v9, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -2961,26 +3101,22 @@ define void @fmsub_vf_v6f16(ptr %x, ptr %y, half %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_vf_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vle16.v v9, (a1)
-; ZVFHMIN-NEXT:    fmv.x.h a1, fa5
-; ZVFHMIN-NEXT:    li a2, 192
-; ZVFHMIN-NEXT:    vmv.s.x v0, a2
-; ZVFHMIN-NEXT:    fmv.x.h a2, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    fmv.x.w a2, fa0
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vle16.v v8, (a1)
+; ZVFHMIN-NEXT:    vle16.v v9, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vmv.v.x v10, a2
-; ZVFHMIN-NEXT:    vmerge.vxm v10, v10, a1, v0
 ; ZVFHMIN-NEXT:    lui a1, 8
-; ZVFHMIN-NEXT:    vxor.vx v9, v9, a1
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v10
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmadd.vv v9, v11, v10
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
+; ZVFHMIN-NEXT:    vxor.vx v8, v8, a1
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmadd.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -2993,23 +3129,14 @@ define void @fmsub_vf_v6f16(ptr %x, ptr %y, half %z) {
 }
 
 define void @fnmsub_vf_v4f32(ptr %x, ptr %y, float %z) {
-; ZVFH-LABEL: fnmsub_vf_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfnmsac.vf v9, fa0, v8
-; ZVFH-NEXT:    vse32.v v9, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fnmsub_vf_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfnmsac.vf v9, fa0, v8
-; ZVFHMIN-NEXT:    vse32.v v9, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fnmsub_vf_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfnmsac.vf v9, fa0, v8
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = insertelement <4 x float> poison, float %z, i32 0
@@ -3041,23 +3168,14 @@ define void @fnmadd_vf_v2f64(ptr %x, ptr %y, double %z) {
 }
 
 define void @fnmsub_fv_v4f32(ptr %x, ptr %y, float %z) {
-; ZVFH-LABEL: fnmsub_fv_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vfnmsac.vf v9, fa0, v8
-; ZVFH-NEXT:    vse32.v v9, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fnmsub_fv_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vfnmsac.vf v9, fa0, v8
-; ZVFHMIN-NEXT:    vse32.v v9, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fnmsub_fv_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vfnmsac.vf v9, fa0, v8
+; CHECK-NEXT:    vse32.v v9, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = insertelement <4 x float> poison, float %z, i32 0
@@ -3106,20 +3224,20 @@ define void @trunc_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: trunc_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3146,25 +3264,337 @@ define void @trunc_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: trunc_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: trunc_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    lui a0, 307200
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_2
+; RV32-ZVFHMIN-NEXT:  # %bb.1:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_2:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_4
+; RV32-ZVFHMIN-NEXT:  # %bb.3:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_4:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_6
+; RV32-ZVFHMIN-NEXT:  # %bb.5:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_6:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_8
+; RV32-ZVFHMIN-NEXT:  # %bb.7:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_8:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_10
+; RV32-ZVFHMIN-NEXT:  # %bb.9:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_10:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB116_12
+; RV32-ZVFHMIN-NEXT:  # %bb.11:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB116_12:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: trunc_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    lui a0, 307200
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_2
+; RV64-ZVFHMIN-NEXT:  # %bb.1:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_2:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_4
+; RV64-ZVFHMIN-NEXT:  # %bb.3:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_4:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_6
+; RV64-ZVFHMIN-NEXT:  # %bb.5:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_6:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_8
+; RV64-ZVFHMIN-NEXT:  # %bb.7:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_8:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_10
+; RV64-ZVFHMIN-NEXT:  # %bb.9:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_10:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB116_12
+; RV64-ZVFHMIN-NEXT:  # %bb.11:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rtz
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rtz
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB116_12:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.trunc.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -3173,35 +3603,20 @@ define void @trunc_v6f16(ptr %x) {
 declare <6 x half> @llvm.trunc.v6f16(<6 x half>)
 
 define void @trunc_v4f32(ptr %x) {
-; ZVFH-LABEL: trunc_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: trunc_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: trunc_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3251,22 +3666,22 @@ define void @ceil_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: ceil_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a1, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a1
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3295,27 +3710,337 @@ define void @ceil_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: ceil_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: ceil_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    lui a0, 307200
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_2
+; RV32-ZVFHMIN-NEXT:  # %bb.1:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_2:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_4
+; RV32-ZVFHMIN-NEXT:  # %bb.3:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_4:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_6
+; RV32-ZVFHMIN-NEXT:  # %bb.5:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_6:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_8
+; RV32-ZVFHMIN-NEXT:  # %bb.7:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_8:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_10
+; RV32-ZVFHMIN-NEXT:  # %bb.9:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_10:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB120_12
+; RV32-ZVFHMIN-NEXT:  # %bb.11:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB120_12:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: ceil_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    lui a0, 307200
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_2
+; RV64-ZVFHMIN-NEXT:  # %bb.1:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_2:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_4
+; RV64-ZVFHMIN-NEXT:  # %bb.3:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_4:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_6
+; RV64-ZVFHMIN-NEXT:  # %bb.5:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_6:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_8
+; RV64-ZVFHMIN-NEXT:  # %bb.7:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_8:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_10
+; RV64-ZVFHMIN-NEXT:  # %bb.9:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_10:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB120_12
+; RV64-ZVFHMIN-NEXT:  # %bb.11:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rup
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rup
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB120_12:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.ceil.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -3324,39 +4049,22 @@ define void @ceil_v6f16(ptr %x) {
 declare <6 x half> @llvm.ceil.v6f16(<6 x half>)
 
 define void @ceil_v4f32(ptr %x) {
-; ZVFH-LABEL: ceil_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a1, 3
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a1
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: ceil_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 3
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: ceil_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    fsrmi a1, 3
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3408,22 +4116,22 @@ define void @floor_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: floor_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a1, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a1
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3452,27 +4160,337 @@ define void @floor_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: floor_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: floor_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    lui a0, 307200
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_2
+; RV32-ZVFHMIN-NEXT:  # %bb.1:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_2:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_4
+; RV32-ZVFHMIN-NEXT:  # %bb.3:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_4:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_6
+; RV32-ZVFHMIN-NEXT:  # %bb.5:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_6:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_8
+; RV32-ZVFHMIN-NEXT:  # %bb.7:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_8:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_10
+; RV32-ZVFHMIN-NEXT:  # %bb.9:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_10:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB124_12
+; RV32-ZVFHMIN-NEXT:  # %bb.11:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB124_12:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: floor_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    lui a0, 307200
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_2
+; RV64-ZVFHMIN-NEXT:  # %bb.1:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_2:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_4
+; RV64-ZVFHMIN-NEXT:  # %bb.3:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_4:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_6
+; RV64-ZVFHMIN-NEXT:  # %bb.5:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_6:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_8
+; RV64-ZVFHMIN-NEXT:  # %bb.7:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_8:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_10
+; RV64-ZVFHMIN-NEXT:  # %bb.9:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_10:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB124_12
+; RV64-ZVFHMIN-NEXT:  # %bb.11:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rdn
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rdn
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB124_12:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.floor.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -3481,39 +4499,22 @@ define void @floor_v6f16(ptr %x) {
 declare <6 x half> @llvm.floor.v6f16(<6 x half>)
 
 define void @floor_v4f32(ptr %x) {
-; ZVFH-LABEL: floor_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a1, 2
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a1
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: floor_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 2
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: floor_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    fsrmi a1, 2
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.floor.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3565,22 +4566,22 @@ define void @round_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: round_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a1, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a1
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3609,27 +4610,337 @@ define void @round_v6f16(ptr %x) {
 ; ZVFH-NEXT:    vse16.v v8, (a0)
 ; ZVFH-NEXT:    ret
 ;
-; ZVFHMIN-LABEL: round_v6f16:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
-; ZVFHMIN-NEXT:    vse16.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; RV32-ZVFHMIN-LABEL: round_v6f16:
+; RV32-ZVFHMIN:       # %bb.0:
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV32-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV32-ZVFHMIN-NEXT:    sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-ZVFHMIN-NEXT:    .cfi_offset ra, -4
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s0, -8
+; RV32-ZVFHMIN-NEXT:    .cfi_offset s1, -12
+; RV32-ZVFHMIN-NEXT:    .cfi_offset fs0, -24
+; RV32-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV32-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV32-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-ZVFHMIN-NEXT:    mv s0, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    lui a0, 307200
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_2
+; RV32-ZVFHMIN-NEXT:  # %bb.1:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_2:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_4
+; RV32-ZVFHMIN-NEXT:  # %bb.3:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_4:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_6
+; RV32-ZVFHMIN-NEXT:  # %bb.5:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_6:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_8
+; RV32-ZVFHMIN-NEXT:  # %bb.7:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_8:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_10
+; RV32-ZVFHMIN-NEXT:  # %bb.9:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_10:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV32-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV32-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV32-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV32-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV32-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV32-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV32-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV32-ZVFHMIN-NEXT:    beqz a0, .LBB128_12
+; RV32-ZVFHMIN-NEXT:  # %bb.11:
+; RV32-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV32-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV32-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV32-ZVFHMIN-NEXT:  .LBB128_12:
+; RV32-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV32-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV32-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV32-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV32-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV32-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV32-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV32-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV32-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV32-ZVFHMIN-NEXT:    lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV32-ZVFHMIN-NEXT:    ret
+;
+; RV64-ZVFHMIN-LABEL: round_v6f16:
+; RV64-ZVFHMIN:       # %bb.0:
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, -48
+; RV64-ZVFHMIN-NEXT:    .cfi_def_cfa_offset 48
+; RV64-ZVFHMIN-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-ZVFHMIN-NEXT:    .cfi_offset ra, -8
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s0, -16
+; RV64-ZVFHMIN-NEXT:    .cfi_offset s1, -24
+; RV64-ZVFHMIN-NEXT:    .cfi_offset fs0, -32
+; RV64-ZVFHMIN-NEXT:    csrr a1, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a1, a1, 1
+; RV64-ZVFHMIN-NEXT:    sub sp, sp, a1
+; RV64-ZVFHMIN-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-ZVFHMIN-NEXT:    mv s0, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vle16.v v8, (a0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    lui a0, 307200
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fs0, a0
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_2
+; RV64-ZVFHMIN-NEXT:  # %bb.1:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_2:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w s1, fa0
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_4
+; RV64-ZVFHMIN-NEXT:  # %bb.3:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_4:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vmv.v.x v8, a0
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, s1
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_6
+; RV64-ZVFHMIN-NEXT:  # %bb.5:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_6:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 3
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_8
+; RV64-ZVFHMIN-NEXT:  # %bb.7:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_8:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 4
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_10
+; RV64-ZVFHMIN-NEXT:  # %bb.9:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_10:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, sp, 16
+; RV64-ZVFHMIN-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    add a0, sp, a0
+; RV64-ZVFHMIN-NEXT:    addi a0, a0, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 5
+; RV64-ZVFHMIN-NEXT:    vmv.x.s a0, v8
+; RV64-ZVFHMIN-NEXT:    fmv.w.x fa0, a0
+; RV64-ZVFHMIN-NEXT:    call __extendhfsf2
+; RV64-ZVFHMIN-NEXT:    fabs.s fa5, fa0
+; RV64-ZVFHMIN-NEXT:    flt.s a0, fa5, fs0
+; RV64-ZVFHMIN-NEXT:    beqz a0, .LBB128_12
+; RV64-ZVFHMIN-NEXT:  # %bb.11:
+; RV64-ZVFHMIN-NEXT:    fcvt.w.s a0, fa0, rmm
+; RV64-ZVFHMIN-NEXT:    fcvt.s.w fa5, a0, rmm
+; RV64-ZVFHMIN-NEXT:    fsgnj.s fa0, fa5, fa0
+; RV64-ZVFHMIN-NEXT:  .LBB128_12:
+; RV64-ZVFHMIN-NEXT:    call __truncsfhf2
+; RV64-ZVFHMIN-NEXT:    fmv.x.w a0, fa0
+; RV64-ZVFHMIN-NEXT:    addi a1, sp, 16
+; RV64-ZVFHMIN-NEXT:    vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; RV64-ZVFHMIN-NEXT:    vslidedown.vi v8, v8, 2
+; RV64-ZVFHMIN-NEXT:    vse16.v v8, (s0)
+; RV64-ZVFHMIN-NEXT:    csrr a0, vlenb
+; RV64-ZVFHMIN-NEXT:    slli a0, a0, 1
+; RV64-ZVFHMIN-NEXT:    add sp, sp, a0
+; RV64-ZVFHMIN-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-ZVFHMIN-NEXT:    addi sp, sp, 48
+; RV64-ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = call <6 x half> @llvm.round.v6f16(<6 x half> %a)
   store <6 x half> %b, ptr %x
@@ -3638,39 +4949,22 @@ define void @round_v6f16(ptr %x) {
 declare <6 x half> @llvm.round.v6f16(<6 x half>)
 
 define void @round_v4f32(ptr %x) {
-; ZVFH-LABEL: round_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a1, 4
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a1
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: round_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    fsrmi a1, 4
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    fsrm a1
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: round_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    fsrmi a1, 4
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    fsrm a1
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.round.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3720,20 +5014,20 @@ define void @rint_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: rint_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3744,35 +5038,20 @@ define void @rint_v8f16(ptr %x) {
 declare <8 x half> @llvm.rint.v8f16(<8 x half>)
 
 define void @rint_v4f32(ptr %x) {
-; ZVFH-LABEL: rint_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: rint_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: rint_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.rint.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3822,22 +5101,22 @@ define void @nearbyint_v8f16(ptr %x) {
 ;
 ; ZVFHMIN-LABEL: nearbyint_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a0)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v9
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    lui a1, 307200
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    fsflags a1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    vse16.v v8, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
@@ -3848,39 +5127,22 @@ define void @nearbyint_v8f16(ptr %x) {
 declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>)
 
 define void @nearbyint_v4f32(ptr %x) {
-; ZVFH-LABEL: nearbyint_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vfabs.v v9, v8
-; ZVFH-NEXT:    lui a1, 307200
-; ZVFH-NEXT:    fmv.w.x fa5, a1
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    frflags a1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT:    fsflags a1
-; ZVFH-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFH-NEXT:    vse32.v v8, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: nearbyint_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vfabs.v v9, v8
-; ZVFHMIN-NEXT:    lui a1, 307200
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a1
-; ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFHMIN-NEXT:    frflags a1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
-; ZVFHMIN-NEXT:    fsflags a1
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
-; ZVFHMIN-NEXT:    vse32.v v8, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: nearbyint_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vfabs.v v9, v8
+; CHECK-NEXT:    lui a1, 307200
+; CHECK-NEXT:    fmv.w.x fa5, a1
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    frflags a1
+; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
+; CHECK-NEXT:    fsflags a1
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
+; CHECK-NEXT:    vse32.v v8, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a)
   store <4 x float> %b, ptr %x
@@ -3925,23 +5187,23 @@ define void @fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmuladd_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a2)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v11, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v11
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -3965,24 +5227,24 @@ define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmuladd_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a2)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v11, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v11
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfadd.vv v8, v8, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -3994,25 +5256,15 @@ define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
 declare <6 x half> @llvm.fmuladd.v6f16(<6 x half>, <6 x half>, <6 x half>)
 
 define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fmuladd_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vle32.v v10, (a2)
-; ZVFH-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFH-NEXT:    vse32.v v10, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fmuladd_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vle32.v v10, (a2)
-; ZVFHMIN-NEXT:    vfmacc.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fmuladd_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vle32.v v10, (a2)
+; CHECK-NEXT:    vfmacc.vv v10, v8, v9
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = load <4 x float>, ptr %z
@@ -4054,23 +5306,23 @@ define void @fmsub_fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_fmuladd_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a2)
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v11, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v11
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <8 x half>, ptr %x
   %b = load <8 x half>, ptr %y
@@ -4094,24 +5346,24 @@ define void @fmsub_fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
 ;
 ; ZVFHMIN-LABEL: fmsub_fmuladd_v6f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vle16.v v8, (a1)
 ; ZVFHMIN-NEXT:    vle16.v v9, (a0)
 ; ZVFHMIN-NEXT:    vle16.v v10, (a2)
-; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfmul.vv v8, v8, v11
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v10
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v9
-; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v9, v8
-; ZVFHMIN-NEXT:    vse16.v v9, (a0)
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v14, v9
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfmul.vv v8, v14, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v11, v8
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v8, v11
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v10
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vfsub.vv v8, v8, v12
+; ZVFHMIN-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v10, v8
+; ZVFHMIN-NEXT:    vse16.v v10, (a0)
 ; ZVFHMIN-NEXT:    ret
   %a = load <6 x half>, ptr %x
   %b = load <6 x half>, ptr %y
@@ -4123,25 +5375,15 @@ define void @fmsub_fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
 }
 
 define void @fnmsub_fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
-; ZVFH-LABEL: fnmsub_fmuladd_v4f32:
-; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; ZVFH-NEXT:    vle32.v v8, (a0)
-; ZVFH-NEXT:    vle32.v v9, (a1)
-; ZVFH-NEXT:    vle32.v v10, (a2)
-; ZVFH-NEXT:    vfnmsac.vv v10, v8, v9
-; ZVFH-NEXT:    vse32.v v10, (a0)
-; ZVFH-NEXT:    ret
-;
-; ZVFHMIN-LABEL: fnmsub_fmuladd_v4f32:
-; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vle32.v v8, (a0)
-; ZVFHMIN-NEXT:    vle32.v v9, (a1)
-; ZVFHMIN-NEXT:    vle32.v v10, (a2)
-; ZVFHMIN-NEXT:    vfnmsac.vv v10, v8, v9
-; ZVFHMIN-NEXT:    vse32.v v10, (a0)
-; ZVFHMIN-NEXT:    ret
+; CHECK-LABEL: fnmsub_fmuladd_v4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vle32.v v9, (a1)
+; CHECK-NEXT:    vle32.v v10, (a2)
+; CHECK-NEXT:    vfnmsac.vv v10, v8, v9
+; CHECK-NEXT:    vse32.v v10, (a0)
+; CHECK-NEXT:    ret
   %a = load <4 x float>, ptr %x
   %b = load <4 x float>, ptr %y
   %c = load <4 x float>, ptr %z
@@ -4170,10 +5412,3 @@ define void @fnmadd_fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
   store <2 x double> %d, ptr %x
   ret void
 }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; ZVFHMIN-RV32: {{.*}}
-; ZVFHMIN-RV64: {{.*}}
-; ZVFHMIN-ZFH-RV32: {{.*}}
-; ZVFHMIN-ZFH-RV64: {{.*}}
-; ZVFHMIN-ZFHIN-RV32: {{.*}}
-; ZVFHMIN-ZFHIN-RV64: {{.*}}


        


More information about the llvm-commits mailing list