[llvm] [ISel/RISCV] Custom-lower vector [l]lround (PR #147713)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 9 05:32:10 PDT 2025


https://github.com/artagnon created https://github.com/llvm/llvm-project/pull/147713

Lower it just like the vector [l]lrint, using vfcvt, with the right rounding mode. Updating costs to account for this custom-lowering is left to a companion patch.

>From c37338346659316897443ce22920dbca75935feb Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 9 Jul 2025 11:30:01 +0100
Subject: [PATCH] [ISel/RISCV] Custom-lower vector [l]lround

Lower it just like the vector [l]lrint, using vfcvt, with the right
rounding mode. Updating costs to account for this custom-lowering is
left to a companion patch.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   22 +-
 .../RISCV/rvv/fixed-vectors-llround.ll        | 1613 ++---------------
 .../CodeGen/RISCV/rvv/fixed-vectors-lround.ll | 1552 +++-------------
 llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll |  282 +++
 llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll  |  759 ++++++++
 5 files changed, 1475 insertions(+), 2753 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index dcb4f690ba35c..db709063b1977 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1070,6 +1070,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       // vXf32.
       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
       setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
+      setOperationAction({ISD::LROUND, ISD::LLROUND}, VT, Custom);
       // Custom-lower insert/extract operations to simplify patterns.
       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
                          Custom);
@@ -1151,6 +1152,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          Custom);
       setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
       setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
+      setOperationAction({ISD::LROUND, ISD::LLROUND}, VT, Custom);
       setOperationAction({ISD::VP_MERGE, ISD::VP_SELECT, ISD::SELECT}, VT,
                          Custom);
       setOperationAction(ISD::SELECT_CC, VT, Expand);
@@ -1453,6 +1455,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
           setOperationAction({ISD::VP_SINT_TO_FP, ISD::VP_UINT_TO_FP}, VT,
                              Custom);
           setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
+          setOperationAction({ISD::LROUND, ISD::LLROUND}, VT, Custom);
           if (Subtarget.hasStdExtZfhmin()) {
             setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
           } else {
@@ -1478,6 +1481,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
           setOperationAction(ISD::BITCAST, VT, Custom);
           setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom);
           setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
+          setOperationAction({ISD::LROUND, ISD::LLROUND}, VT, Custom);
           if (Subtarget.hasStdExtZfbfmin()) {
             setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
           } else {
@@ -1511,7 +1515,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
 
         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND,
                             ISD::FROUNDEVEN, ISD::FRINT, ISD::LRINT,
-                            ISD::LLRINT, ISD::FNEARBYINT},
+                            ISD::LLRINT, ISD::LROUND, ISD::LLROUND,
+                            ISD::FNEARBYINT},
                            VT, Custom);
 
         setCondCodeAction(VFPCCToExpand, VT, Expand);
@@ -3211,7 +3216,11 @@ static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
   case ISD::VP_FCEIL:
     return RISCVFPRndMode::RUP;
   case ISD::FROUND:
+  case ISD::LROUND:
+  case ISD::LLROUND:
   case ISD::STRICT_FROUND:
+  case ISD::STRICT_LROUND:
+  case ISD::STRICT_LLROUND:
   case ISD::VP_FROUND:
     return RISCVFPRndMode::RMM;
   case ISD::FRINT:
@@ -3469,9 +3478,9 @@ lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
                      DAG.getTargetConstant(FRM, DL, Subtarget.getXLenVT()));
 }
 
-// Expand vector LRINT and LLRINT by converting to the integer domain.
-static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
-                                const RISCVSubtarget &Subtarget) {
+// Expand vector [L]LRINT and [L]LROUND by converting to the integer domain.
+static SDValue lowerVectorXRINT_XROUND(SDValue Op, SelectionDAG &DAG,
+                                       const RISCVSubtarget &Subtarget) {
   SDLoc DL(Op);
   MVT DstVT = Op.getSimpleValueType();
   SDValue Src = Op.getOperand(0);
@@ -7711,11 +7720,10 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
   case ISD::LRINT:
   case ISD::LLRINT:
-    if (Op.getValueType().isVector())
-      return lowerVectorXRINT(Op, DAG, Subtarget);
-    [[fallthrough]];
   case ISD::LROUND:
   case ISD::LLROUND: {
+    if (Op.getValueType().isVector())
+      return lowerVectorXRINT_XROUND(Op, DAG, Subtarget);
     assert(Op.getOperand(0).getValueType() == MVT::f16 &&
            "Unexpected custom legalisation");
     SDLoc DL(Op);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
index b8ca7fd71cb93..5751759ddd9cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llround.ll
@@ -1,35 +1,28 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh -target-abi=ilp32d \
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=ilp32d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh -target-abi=lp64d \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
 
 define <1 x i64> @llround_v1f16(<1 x half> %x) nounwind {
 ; RV32-LABEL: llround_v1f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    mv a0, sp
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half> %x)
   ret <1 x i64> %a
@@ -39,58 +32,22 @@ declare <1 x i64> @llvm.llround.v1i64.v1f16(<1 x half>)
 define <2 x i64> @llround_v2f16(<2 x half> %x) nounwind {
 ; RV32-LABEL: llround_v2f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    add a2, sp, a2
-; RV32-NEXT:    addi a2, a2, 16
-; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half> %x)
   ret <2 x i64> %a
@@ -100,108 +57,22 @@ declare <2 x i64> @llvm.llround.v2i64.v2f16(<2 x half>)
 define <3 x i64> @llround_v3f16(<3 x half> %x) nounwind {
 ; RV32-LABEL: llround_v3f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v3f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half> %x)
   ret <3 x i64> %a
@@ -211,108 +82,22 @@ declare <3 x i64> @llvm.llround.v3i64.v3f16(<3 x half>)
 define <4 x i64> @llround_v4f16(<4 x half> %x) nounwind {
 ; RV32-LABEL: llround_v4f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half> %x)
   ret <4 x i64> %a
@@ -322,147 +107,22 @@ declare <4 x i64> @llvm.llround.v4i64.v4f16(<4 x half>)
 define <8 x i64> @llround_v8f16(<8 x half> %x) nounwind {
 ; RV32-LABEL: llround_v8f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -208
-; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 208
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 64(sp)
-; RV32-NEXT:    sw a1, 68(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 120(sp)
-; RV32-NEXT:    sw a1, 124(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 6
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 112(sp)
-; RV32-NEXT:    sw a1, 116(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 5
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 104(sp)
-; RV32-NEXT:    sw a1, 108(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 4
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 96(sp)
-; RV32-NEXT:    sw a1, 100(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 88(sp)
-; RV32-NEXT:    sw a1, 92(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 80(sp)
-; RV32-NEXT:    sw a1, 84(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 72(sp)
-; RV32-NEXT:    sw a1, 76(sp)
-; RV32-NEXT:    addi a0, sp, 64
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi sp, s0, -208
-; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 208
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v12, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v12
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi s0, sp, 128
-; RV64-NEXT:    andi sp, sp, -64
-; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v9, v8, 7
-; RV64-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-NEXT:    vslidedown.vi v11, v8, 5
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    vslidedown.vi v9, v8, 4
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    vslidedown.vi v11, v8, 2
-; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    sd a4, 32(sp)
-; RV64-NEXT:    sd a3, 40(sp)
-; RV64-NEXT:    sd a2, 48(sp)
-; RV64-NEXT:    sd a1, 56(sp)
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    sd a0, 0(sp)
-; RV64-NEXT:    sd a1, 8(sp)
-; RV64-NEXT:    sd a6, 16(sp)
-; RV64-NEXT:    sd a5, 24(sp)
-; RV64-NEXT:    mv a0, sp
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi sp, s0, -128
-; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v12, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half> %x)
   ret <8 x i64> %a
@@ -472,262 +132,22 @@ declare <8 x i64> @llvm.llround.v8i64.v8f16(<8 x half>)
 define <16 x i64> @llround_v16f16(<16 x half> %x) nounwind {
 ; RV32-LABEL: llround_v16f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -400
-; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 400
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    andi sp, sp, -128
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 128(sp)
-; RV32-NEXT:    sw a1, 132(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 15
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 248(sp)
-; RV32-NEXT:    sw a1, 252(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 14
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 240(sp)
-; RV32-NEXT:    sw a1, 244(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 13
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 232(sp)
-; RV32-NEXT:    sw a1, 236(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 12
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 224(sp)
-; RV32-NEXT:    sw a1, 228(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 11
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 216(sp)
-; RV32-NEXT:    sw a1, 220(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 10
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 208(sp)
-; RV32-NEXT:    sw a1, 212(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 9
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 200(sp)
-; RV32-NEXT:    sw a1, 204(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 8
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 192(sp)
-; RV32-NEXT:    sw a1, 196(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 184(sp)
-; RV32-NEXT:    sw a1, 188(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 6
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 176(sp)
-; RV32-NEXT:    sw a1, 180(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 5
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 168(sp)
-; RV32-NEXT:    sw a1, 172(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 4
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 160(sp)
-; RV32-NEXT:    sw a1, 164(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 152(sp)
-; RV32-NEXT:    sw a1, 156(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 144(sp)
-; RV32-NEXT:    sw a1, 148(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa0, fa5
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 136(sp)
-; RV32-NEXT:    sw a1, 140(sp)
-; RV32-NEXT:    li a0, 32
-; RV32-NEXT:    addi a1, sp, 128
-; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; RV32-NEXT:    vle32.v v8, (a1)
-; RV32-NEXT:    addi sp, s0, -400
-; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 400
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v8, v16
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v16f16:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -256
-; RV64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi s0, sp, 256
-; RV64-NEXT:    andi sp, sp, -128
-; RV64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 15
-; RV64-NEXT:    vslidedown.vi v12, v8, 14
-; RV64-NEXT:    vslidedown.vi v14, v8, 13
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 12
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    vslidedown.vi v12, v8, 11
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v14
-; RV64-NEXT:    vslidedown.vi v14, v8, 10
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    vslidedown.vi v12, v8, 8
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v14
-; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 7
-; RV64-NEXT:    vslidedown.vi v11, v8, 6
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-NEXT:    vslidedown.vi v13, v8, 4
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-NEXT:    vslidedown.vi v14, v8, 2
-; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s t0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s t1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s t2, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s t3, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v13
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    fcvt.l.s t4, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    sd a5, 96(sp)
-; RV64-NEXT:    sd a3, 104(sp)
-; RV64-NEXT:    sd a2, 112(sp)
-; RV64-NEXT:    sd a1, 120(sp)
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v14
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    sd t0, 64(sp)
-; RV64-NEXT:    sd a7, 72(sp)
-; RV64-NEXT:    sd a6, 80(sp)
-; RV64-NEXT:    sd a4, 88(sp)
-; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-NEXT:    sd t4, 32(sp)
-; RV64-NEXT:    sd t3, 40(sp)
-; RV64-NEXT:    sd t2, 48(sp)
-; RV64-NEXT:    sd t1, 56(sp)
-; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-NEXT:    sd a0, 0(sp)
-; RV64-NEXT:    sd a3, 8(sp)
-; RV64-NEXT:    sd a2, 16(sp)
-; RV64-NEXT:    sd a1, 24(sp)
-; RV64-NEXT:    mv a0, sp
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi sp, s0, -256
-; RV64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 256
+; RV64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT:    vfwcvt.f.f.v v16, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half> %x)
   ret <16 x i64> %a
@@ -737,27 +157,20 @@ declare <16 x i64> @llvm.llround.v16i64.v16f16(<16 x half>)
 define <1 x i64> @llround_v1i64_v1f32(<1 x float> %x) nounwind {
 ; RV32-LABEL: llround_v1i64_v1f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    mv a0, sp
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v9, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1i64_v1f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v9, v8
+; RV64-NEXT:    fsrm a0
+; RV64-NEXT:    vmv1r.v v8, v9
 ; RV64-NEXT:    ret
   %a = call <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float> %x)
   ret <1 x i64> %a
@@ -767,54 +180,20 @@ declare <1 x i64> @llvm.llround.v1i64.v1f32(<1 x float>)
 define <2 x i64> @llround_v2i64_v2f32(<2 x float> %x) nounwind {
 ; RV32-LABEL: llround_v2i64_v2f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    add a2, sp, a2
-; RV32-NEXT:    addi a2, a2, 16
-; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.x.f.v v9, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2i64_v2f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT:    vfwcvt.x.f.v v9, v8
+; RV64-NEXT:    fsrm a0
+; RV64-NEXT:    vmv1r.v v8, v9
 ; RV64-NEXT:    ret
   %a = call <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float> %x)
   ret <2 x i64> %a
@@ -824,100 +203,20 @@ declare <2 x i64> @llvm.llround.v2i64.v2f32(<2 x float>)
 define <3 x i64> @llround_v3i64_v3f32(<3 x float> %x) nounwind {
 ; RV32-LABEL: llround_v3i64_v3f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv1r.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vfwcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v3i64_v3f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv1r.v v10, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float> %x)
   ret <3 x i64> %a
@@ -927,100 +226,20 @@ declare <3 x i64> @llvm.llround.v3i64.v3f32(<3 x float>)
 define <4 x i64> @llround_v4i64_v4f32(<4 x float> %x) nounwind {
 ; RV32-LABEL: llround_v4i64_v4f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a1, a0, 1
-; RV32-NEXT:    add a0, a1, a0
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv1r.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vfwcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4i64_v4f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v11
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT:    vmv1r.v v10, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float> %x)
   ret <4 x i64> %a
@@ -1030,133 +249,20 @@ declare <4 x i64> @llvm.llround.v4i64.v4f32(<4 x float>)
 define <8 x i64> @llround_v8i64_v8f32(<8 x float> %x) nounwind {
 ; RV32-LABEL: llround_v8i64_v8f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -208
-; RV32-NEXT:    sw ra, 204(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 200(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 208
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 64(sp)
-; RV32-NEXT:    sw a1, 68(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 120(sp)
-; RV32-NEXT:    sw a1, 124(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 6
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 112(sp)
-; RV32-NEXT:    sw a1, 116(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 5
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 104(sp)
-; RV32-NEXT:    sw a1, 108(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 4
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 96(sp)
-; RV32-NEXT:    sw a1, 100(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 88(sp)
-; RV32-NEXT:    sw a1, 92(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 80(sp)
-; RV32-NEXT:    sw a1, 84(sp)
-; RV32-NEXT:    addi a0, sp, 192
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 72(sp)
-; RV32-NEXT:    sw a1, 76(sp)
-; RV32-NEXT:    addi a0, sp, 64
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi sp, s0, -208
-; RV32-NEXT:    lw ra, 204(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 200(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 208
+; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT:    vmv2r.v v12, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vfwcvt.x.f.v v8, v12
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8i64_v8f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -128
-; RV64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi s0, sp, 128
-; RV64-NEXT:    andi sp, sp, -64
-; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslidedown.vi v10, v8, 7
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 4
-; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 3
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
-; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    sd a4, 32(sp)
-; RV64-NEXT:    sd a3, 40(sp)
-; RV64-NEXT:    sd a2, 48(sp)
-; RV64-NEXT:    sd a1, 56(sp)
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    sd a0, 0(sp)
-; RV64-NEXT:    sd a1, 8(sp)
-; RV64-NEXT:    sd a6, 16(sp)
-; RV64-NEXT:    sd a5, 24(sp)
-; RV64-NEXT:    mv a0, sp
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi sp, s0, -128
-; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 128
+; RV64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-NEXT:    vmv2r.v v12, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float> %x)
   ret <8 x i64> %a
@@ -1166,198 +272,20 @@ declare <8 x i64> @llvm.llround.v8i64.v8f32(<8 x float>)
 define <16 x i64> @llround_v16i64_v16f32(<16 x float> %x) nounwind {
 ; RV32-LABEL: llround_v16i64_v16f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -400
-; RV32-NEXT:    sw ra, 396(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 392(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 400
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 2
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    andi sp, sp, -128
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
-; RV32-NEXT:    addi a0, sp, 64
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vse32.v v8, (a0)
-; RV32-NEXT:    flw fa0, 124(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 248(sp)
-; RV32-NEXT:    sw a1, 252(sp)
-; RV32-NEXT:    flw fa0, 120(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 240(sp)
-; RV32-NEXT:    sw a1, 244(sp)
-; RV32-NEXT:    flw fa0, 116(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 232(sp)
-; RV32-NEXT:    sw a1, 236(sp)
-; RV32-NEXT:    flw fa0, 112(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 224(sp)
-; RV32-NEXT:    sw a1, 228(sp)
-; RV32-NEXT:    flw fa0, 108(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 216(sp)
-; RV32-NEXT:    sw a1, 220(sp)
-; RV32-NEXT:    flw fa0, 104(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 208(sp)
-; RV32-NEXT:    sw a1, 212(sp)
-; RV32-NEXT:    flw fa0, 100(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 200(sp)
-; RV32-NEXT:    sw a1, 204(sp)
-; RV32-NEXT:    flw fa0, 96(sp)
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 192(sp)
-; RV32-NEXT:    sw a1, 196(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 128(sp)
-; RV32-NEXT:    sw a1, 132(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 152(sp)
-; RV32-NEXT:    sw a1, 156(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 144(sp)
-; RV32-NEXT:    sw a1, 148(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 136(sp)
-; RV32-NEXT:    sw a1, 140(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 184(sp)
-; RV32-NEXT:    sw a1, 188(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 6
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 176(sp)
-; RV32-NEXT:    sw a1, 180(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 5
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 168(sp)
-; RV32-NEXT:    sw a1, 172(sp)
-; RV32-NEXT:    addi a0, sp, 384
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 4
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llroundf
-; RV32-NEXT:    sw a0, 160(sp)
-; RV32-NEXT:    sw a1, 164(sp)
-; RV32-NEXT:    li a0, 32
-; RV32-NEXT:    addi a1, sp, 128
-; RV32-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; RV32-NEXT:    vle32.v v8, (a1)
-; RV32-NEXT:    addi sp, s0, -400
-; RV32-NEXT:    lw ra, 396(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 392(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 400
+; RV32-NEXT:    vmv4r.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vfwcvt.x.f.v v8, v16
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v16i64_v16f32:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -384
-; RV64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi s0, sp, 384
-; RV64-NEXT:    andi sp, sp, -128
-; RV64-NEXT:    addi a0, sp, 64
 ; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT:    vse32.v v8, (a0)
-; RV64-NEXT:    flw fa5, 124(sp)
-; RV64-NEXT:    vfmv.f.s fa4, v8
-; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    sd a0, 248(sp)
-; RV64-NEXT:    flw fa5, 120(sp)
-; RV64-NEXT:    vslidedown.vi v13, v8, 1
-; RV64-NEXT:    fcvt.l.s a0, fa4, rmm
-; RV64-NEXT:    vfmv.f.s fa4, v10
-; RV64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-NEXT:    sd a1, 240(sp)
-; RV64-NEXT:    flw fa5, 116(sp)
-; RV64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV64-NEXT:    vslidedown.vi v10, v8, 7
-; RV64-NEXT:    fcvt.l.s a1, fa4, rmm
-; RV64-NEXT:    vfmv.f.s fa4, v12
-; RV64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-NEXT:    sd a2, 232(sp)
-; RV64-NEXT:    flw fa5, 112(sp)
-; RV64-NEXT:    fcvt.l.s a2, fa4, rmm
-; RV64-NEXT:    vfmv.f.s fa4, v13
-; RV64-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-NEXT:    sd a3, 224(sp)
-; RV64-NEXT:    flw fa5, 108(sp)
-; RV64-NEXT:    fcvt.l.s a3, fa4, rmm
-; RV64-NEXT:    vfmv.f.s fa4, v10
-; RV64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-NEXT:    sd a4, 216(sp)
-; RV64-NEXT:    flw fa5, 104(sp)
-; RV64-NEXT:    fcvt.l.s a4, fa4, rmm
-; RV64-NEXT:    vfmv.f.s fa4, v12
-; RV64-NEXT:    fcvt.l.s a5, fa4, rmm
-; RV64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-NEXT:    sd a6, 208(sp)
-; RV64-NEXT:    flw fa5, 100(sp)
-; RV64-NEXT:    vfmv.f.s fa4, v10
-; RV64-NEXT:    fcvt.l.s a6, fa4, rmm
-; RV64-NEXT:    vslidedown.vi v8, v8, 4
-; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    sd a7, 200(sp)
-; RV64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-NEXT:    flw fa5, 96(sp)
-; RV64-NEXT:    sd a0, 128(sp)
-; RV64-NEXT:    sd a3, 136(sp)
-; RV64-NEXT:    sd a2, 144(sp)
-; RV64-NEXT:    sd a1, 152(sp)
-; RV64-NEXT:    sd a7, 160(sp)
-; RV64-NEXT:    sd a6, 168(sp)
-; RV64-NEXT:    sd a5, 176(sp)
-; RV64-NEXT:    sd a4, 184(sp)
-; RV64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-NEXT:    sd a0, 192(sp)
-; RV64-NEXT:    addi a0, sp, 128
-; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi sp, s0, -384
-; RV64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 384
+; RV64-NEXT:    vmv4r.v v16, v8
+; RV64-NEXT:    fsrmi a0, 4
+; RV64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float> %x)
   ret <16 x i64> %a
@@ -1367,26 +295,18 @@ declare <16 x i64> @llvm.llround.v16i64.v16f32(<16 x float>)
 define <1 x i64> @llround_v1i64_v1f64(<1 x double> %x) nounwind {
 ; RV32-LABEL: llround_v1i64_v1f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    sw a1, 4(sp)
-; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v1i64_v1f64:
 ; RV64:       # %bb.0:
+; RV64-NEXT:    fsrmi a0, 4
 ; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    vmv.s.x v8, a0
+; RV64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double> %x)
   ret <1 x i64> %a
@@ -1396,53 +316,18 @@ declare <1 x i64> @llvm.llround.v1i64.v1f64(<1 x double>)
 define <2 x i64> @llround_v2i64_v2f64(<2 x double> %x) nounwind {
 ; RV32-LABEL: llround_v2i64_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs1r.v v8, (a0) # vscale x 8-byte Folded Spill
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vl1r.v v8, (a0) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    add a2, sp, a2
-; RV32-NEXT:    addi a2, a2, 16
-; RV32-NEXT:    vl1r.v v8, (a2) # vscale x 8-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v2i64_v2f64:
 ; RV64:       # %bb.0:
+; RV64-NEXT:    fsrmi a0, 4
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v9
-; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double> %x)
   ret <2 x i64> %a
@@ -1452,97 +337,18 @@ declare <2 x i64> @llvm.llround.v2i64.v2f64(<2 x double>)
 define <4 x i64> @llround_v4i64_v4f64(<4 x double> %x) nounwind {
 ; RV32-LABEL: llround_v4i64_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -32
-; RV32-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 2
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs2r.v v8, (a0) # vscale x 16-byte Folded Spill
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl2r.v v8, (a0) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    addi a2, sp, 16
-; RV32-NEXT:    vl2r.v v8, (a2) # vscale x 16-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 2
-; RV32-NEXT:    add sp, sp, a0
-; RV32-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 32
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v4i64_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v12, v8, 1
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v12
-; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v10
+; RV64-NEXT:    fsrmi a0, 4
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v10, a0
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    vslide1down.vx v8, v10, a1
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double> %x)
   ret <4 x i64> %a
@@ -1552,121 +358,18 @@ declare <4 x i64> @llvm.llround.v4i64.v4f64(<4 x double>)
 define <8 x i64> @llround_v8i64_v8f64(<8 x double> %x) nounwind {
 ; RV32-LABEL: llround_v8i64_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -272
-; RV32-NEXT:    sw ra, 268(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 264(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 272
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 2
-; RV32-NEXT:    sub sp, sp, a0
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    addi a0, sp, 256
-; RV32-NEXT:    vs4r.v v8, (a0) # vscale x 32-byte Folded Spill
-; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vse64.v v8, (a0)
-; RV32-NEXT:    fld fa0, 120(sp)
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 184(sp)
-; RV32-NEXT:    sw a1, 188(sp)
-; RV32-NEXT:    fld fa0, 112(sp)
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 176(sp)
-; RV32-NEXT:    sw a1, 180(sp)
-; RV32-NEXT:    fld fa0, 104(sp)
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 168(sp)
-; RV32-NEXT:    sw a1, 172(sp)
-; RV32-NEXT:    fld fa0, 96(sp)
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 160(sp)
-; RV32-NEXT:    sw a1, 164(sp)
-; RV32-NEXT:    addi a0, sp, 256
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 128(sp)
-; RV32-NEXT:    sw a1, 132(sp)
-; RV32-NEXT:    addi a0, sp, 256
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 136(sp)
-; RV32-NEXT:    sw a1, 140(sp)
-; RV32-NEXT:    addi a0, sp, 256
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 152(sp)
-; RV32-NEXT:    sw a1, 156(sp)
-; RV32-NEXT:    addi a0, sp, 256
-; RV32-NEXT:    vl4r.v v8, (a0) # vscale x 32-byte Folded Reload
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 2
-; RV32-NEXT:    vfmv.f.s fa0, v8
-; RV32-NEXT:    call llround
-; RV32-NEXT:    sw a0, 144(sp)
-; RV32-NEXT:    sw a1, 148(sp)
-; RV32-NEXT:    addi a0, sp, 128
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi sp, s0, -272
-; RV32-NEXT:    lw ra, 268(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 264(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 272
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: llround_v8i64_v8f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi sp, sp, -192
-; RV64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
-; RV64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-NEXT:    addi s0, sp, 192
-; RV64-NEXT:    andi sp, sp, -64
-; RV64-NEXT:    mv a0, sp
-; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vse64.v v8, (a0)
-; RV64-NEXT:    fld fa5, 56(sp)
-; RV64-NEXT:    vfmv.f.s fa4, v8
-; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-NEXT:    fcvt.l.d a0, fa4, rmm
-; RV64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-NEXT:    sd a1, 120(sp)
-; RV64-NEXT:    fld fa5, 48(sp)
-; RV64-NEXT:    vfmv.f.s fa4, v10
-; RV64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-NEXT:    fcvt.l.d a1, fa4, rmm
-; RV64-NEXT:    fcvt.l.d a2, fa5, rmm
-; RV64-NEXT:    sd a2, 112(sp)
-; RV64-NEXT:    fld fa5, 40(sp)
-; RV64-NEXT:    vfmv.f.s fa4, v10
-; RV64-NEXT:    fcvt.l.d a2, fa4, rmm
-; RV64-NEXT:    vslidedown.vi v8, v8, 2
-; RV64-NEXT:    fcvt.l.d a3, fa5, rmm
-; RV64-NEXT:    vfmv.f.s fa5, v8
-; RV64-NEXT:    sd a3, 104(sp)
-; RV64-NEXT:    fcvt.l.d a3, fa5, rmm
-; RV64-NEXT:    fld fa5, 32(sp)
-; RV64-NEXT:    sd a0, 64(sp)
-; RV64-NEXT:    sd a1, 72(sp)
-; RV64-NEXT:    sd a3, 80(sp)
-; RV64-NEXT:    sd a2, 88(sp)
-; RV64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-NEXT:    sd a0, 96(sp)
-; RV64-NEXT:    addi a0, sp, 64
+; RV64-NEXT:    fsrmi a0, 4
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT:    vle64.v v8, (a0)
-; RV64-NEXT:    addi sp, s0, -192
-; RV64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
-; RV64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-NEXT:    addi sp, sp, 192
+; RV64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-NEXT:    fsrm a0
 ; RV64-NEXT:    ret
   %a = call <8 x i64> @llvm.llround.v8i64.v8f64(<8 x double> %x)
   ret <8 x i64> %a
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
index 8289a8b8f833a..64b3b7912ed32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lround.ll
@@ -1,40 +1,40 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
 ; RUN:     -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
 ; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfh \
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
 ; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
 
 define <1 x iXLen> @lround_v1f16(<1 x half> %x) nounwind {
 ; RV32-LABEL: lround_v1f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v1f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v1f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half> %x)
   ret <1 x iXLen> %a
@@ -44,47 +44,32 @@ declare <1 x iXLen> @llvm.lround.v1iXLen.v1f16(<1 x half>)
 define <2 x iXLen> @lround_v2f16(<2 x half> %x) nounwind {
 ; RV32-LABEL: lround_v2f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v2f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v8, a0
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v2f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half> %x)
   ret <2 x iXLen> %a
@@ -94,83 +79,32 @@ declare <2 x iXLen> @llvm.lround.v2iXLen.v2f16(<2 x half>)
 define <3 x iXLen> @lround_v3f16(<3 x half> %x) nounwind {
 ; RV32-LABEL: lround_v3f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v9, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v9, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v3f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v9, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v3f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half> %x)
   ret <3 x iXLen> %a
@@ -180,83 +114,32 @@ declare <3 x iXLen> @llvm.lround.v3iXLen.v3f16(<3 x half>)
 define <4 x iXLen> @lround_v4f16(<4 x half> %x) nounwind {
 ; RV32-LABEL: lround_v4f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v9, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v9, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v4f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v9, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v4f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, mf2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half> %x)
   ret <4 x iXLen> %a
@@ -266,168 +149,32 @@ declare <4 x iXLen> @llvm.lround.v4iXLen.v4f16(<4 x half>)
 define <8 x iXLen> @lround_v8f16(<8 x half> %x) nounwind {
 ; RV32-LABEL: lround_v8f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v10, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v9, v8, 4
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v10, a1
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    vslidedown.vi v9, v8, 5
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v10, v10, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    vslidedown.vi v12, v8, 6
-; RV32-NEXT:    vslidedown.vi v13, v8, 7
-; RV32-NEXT:    fcvt.s.h fa5, fa5
+; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v10, a1
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v13
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v8f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v10, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 4
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 5
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-i32-NEXT:    vslidedown.vi v13, v8, 7
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v10, a1
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v13
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
+; RV64-i32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v8f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    addi sp, sp, -128
-; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    addi s0, sp, 128
-; RV64-i64-NEXT:    andi sp, sp, -64
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 7
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 5
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 4
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    sd a4, 32(sp)
-; RV64-i64-NEXT:    sd a3, 40(sp)
-; RV64-i64-NEXT:    sd a2, 48(sp)
-; RV64-i64-NEXT:    sd a1, 56(sp)
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 0(sp)
-; RV64-i64-NEXT:    sd a1, 8(sp)
-; RV64-i64-NEXT:    sd a6, 16(sp)
-; RV64-i64-NEXT:    sd a5, 24(sp)
-; RV64-i64-NEXT:    mv a0, sp
-; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-i64-NEXT:    vle64.v v8, (a0)
-; RV64-i64-NEXT:    addi sp, s0, -128
-; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    addi sp, sp, 128
+; RV64-i64-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v12, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half> %x)
   ret <8 x iXLen> %a
@@ -437,293 +184,32 @@ declare <8 x iXLen> @llvm.lround.v8iXLen.v8f16(<8 x half>)
 define <16 x iXLen> @lround_v16f16(<16 x half> %x) nounwind {
 ; RV32-LABEL: lround_v16f16:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -128
-; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 128
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 15
-; RV32-NEXT:    vslidedown.vi v12, v8, 14
-; RV32-NEXT:    vslidedown.vi v14, v8, 13
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 12
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 11
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a2, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v14
-; RV32-NEXT:    vslidedown.vi v14, v8, 10
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a5, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a4, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v14
-; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 7
-; RV32-NEXT:    vslidedown.vi v11, v8, 6
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a6, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 5
-; RV32-NEXT:    vslidedown.vi v13, v8, 4
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    vslidedown.vi v14, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 1
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s t0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s t1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v11
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s t2, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s t3, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v13
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    fcvt.w.s t4, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    sw a5, 48(sp)
-; RV32-NEXT:    sw a3, 52(sp)
-; RV32-NEXT:    sw a2, 56(sp)
-; RV32-NEXT:    sw a1, 60(sp)
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v14
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    sw t0, 32(sp)
-; RV32-NEXT:    sw a7, 36(sp)
-; RV32-NEXT:    sw a6, 40(sp)
-; RV32-NEXT:    sw a4, 44(sp)
-; RV32-NEXT:    fcvt.w.s a2, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.s.h fa5, fa5
-; RV32-NEXT:    sw t4, 16(sp)
-; RV32-NEXT:    sw t3, 20(sp)
-; RV32-NEXT:    sw t2, 24(sp)
-; RV32-NEXT:    sw t1, 28(sp)
-; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV32-NEXT:    sw a0, 0(sp)
-; RV32-NEXT:    sw a3, 4(sp)
-; RV32-NEXT:    sw a2, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    mv a0, sp
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi sp, s0, -128
-; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v12, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v12
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v16f16:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    addi sp, sp, -128
-; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    addi s0, sp, 128
-; RV64-i32-NEXT:    andi sp, sp, -64
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 15
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 14
-; RV64-i32-NEXT:    vslidedown.vi v14, v8, 13
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 12
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 11
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a2, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v14
-; RV64-i32-NEXT:    vslidedown.vi v14, v8, 10
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a5, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a4, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v14
-; RV64-i32-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 7
-; RV64-i32-NEXT:    vslidedown.vi v11, v8, 6
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a6, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-i32-NEXT:    vslidedown.vi v13, v8, 4
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-i32-NEXT:    vslidedown.vi v14, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s t0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s t1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v11
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s t2, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s t3, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v13
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    fcvt.w.s t4, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    sw a5, 48(sp)
-; RV64-i32-NEXT:    sw a3, 52(sp)
-; RV64-i32-NEXT:    sw a2, 56(sp)
-; RV64-i32-NEXT:    sw a1, 60(sp)
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v14
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    sw t0, 32(sp)
-; RV64-i32-NEXT:    sw a7, 36(sp)
-; RV64-i32-NEXT:    sw a6, 40(sp)
-; RV64-i32-NEXT:    sw a4, 44(sp)
-; RV64-i32-NEXT:    fcvt.w.s a2, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i32-NEXT:    sw t4, 16(sp)
-; RV64-i32-NEXT:    sw t3, 20(sp)
-; RV64-i32-NEXT:    sw t2, 24(sp)
-; RV64-i32-NEXT:    sw t1, 28(sp)
-; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV64-i32-NEXT:    sw a0, 0(sp)
-; RV64-i32-NEXT:    sw a3, 4(sp)
-; RV64-i32-NEXT:    sw a2, 8(sp)
-; RV64-i32-NEXT:    sw a1, 12(sp)
-; RV64-i32-NEXT:    mv a0, sp
-; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-i32-NEXT:    vle32.v v8, (a0)
-; RV64-i32-NEXT:    addi sp, s0, -128
-; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    addi sp, sp, 128
+; RV64-i32-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v12, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v16f16:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    addi sp, sp, -256
-; RV64-i64-NEXT:    sd ra, 248(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    sd s0, 240(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    addi s0, sp, 256
-; RV64-i64-NEXT:    andi sp, sp, -128
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 15
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 14
-; RV64-i64-NEXT:    vslidedown.vi v14, v8, 13
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 12
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v12
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 11
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v14
-; RV64-i64-NEXT:    vslidedown.vi v14, v8, 10
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v12
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 8
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v14
-; RV64-i64-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 7
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 6
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-i64-NEXT:    vslidedown.vi v13, v8, 4
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v12
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-i64-NEXT:    vslidedown.vi v14, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s t0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s t1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s t2, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s t3, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v13
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    fcvt.l.s t4, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v12
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    sd a5, 96(sp)
-; RV64-i64-NEXT:    sd a3, 104(sp)
-; RV64-i64-NEXT:    sd a2, 112(sp)
-; RV64-i64-NEXT:    sd a1, 120(sp)
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v14
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    sd t0, 64(sp)
-; RV64-i64-NEXT:    sd a7, 72(sp)
-; RV64-i64-NEXT:    sd a6, 80(sp)
-; RV64-i64-NEXT:    sd a4, 88(sp)
-; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.s.h fa5, fa5
-; RV64-i64-NEXT:    sd t4, 32(sp)
-; RV64-i64-NEXT:    sd t3, 40(sp)
-; RV64-i64-NEXT:    sd t2, 48(sp)
-; RV64-i64-NEXT:    sd t1, 56(sp)
-; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 0(sp)
-; RV64-i64-NEXT:    sd a3, 8(sp)
-; RV64-i64-NEXT:    sd a2, 16(sp)
-; RV64-i64-NEXT:    sd a1, 24(sp)
-; RV64-i64-NEXT:    mv a0, sp
-; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; RV64-i64-NEXT:    vle64.v v8, (a0)
-; RV64-i64-NEXT:    addi sp, s0, -256
-; RV64-i64-NEXT:    ld ra, 248(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    ld s0, 240(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    addi sp, sp, 256
+; RV64-i64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half> %x)
   ret <16 x iXLen> %a
@@ -733,27 +219,27 @@ declare <16 x iXLen> @llvm.lround.v16iXLen.v16f16(<16 x half>)
 define <1 x iXLen> @lround_v1f32(<1 x float> %x) nounwind {
 ; RV32-LABEL: lround_v1f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v1f32:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v1f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v9, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    vmv1r.v v8, v9
 ; RV64-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float> %x)
   ret <1 x iXLen> %a
@@ -763,39 +249,27 @@ declare <1 x iXLen> @llvm.lround.v1iXLen.v1f32(<1 x float>)
 define <2 x iXLen> @lround_v2f32(<2 x float> %x) nounwind {
 ; RV32-LABEL: lround_v2f32:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v2f32:
 ; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vmv.v.x v8, a0
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v2f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v9, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    vmv1r.v v8, v9
 ; RV64-i64-NEXT:    ret
   %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float> %x)
   ret <2 x iXLen> %a
@@ -805,65 +279,27 @@ declare <2 x iXLen> @llvm.lround.v2iXLen.v2f32(<2 x float>)
 define <3 x iXLen> @lround_v3f32(<3 x float> %x) nounwind {
 ; RV32-LABEL: lround_v3f32:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    vmv.v.x v9, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslide1down.vx v8, v9, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v3f32:
 ; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    vmv.v.x v9, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v3f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i64-NEXT:    vmv1r.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float> %x)
   ret <3 x iXLen> %a
@@ -873,65 +309,27 @@ declare <3 x iXLen> @llvm.lround.v3iXLen.v3f32(<3 x float>)
 define <4 x iXLen> @lround_v4f32(<4 x float> %x) nounwind {
 ; RV32-LABEL: lround_v4f32:
 ; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    vmv.v.x v9, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslide1down.vx v8, v9, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v4f32:
 ; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    vmv.v.x v9, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v4f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v11, v8, 3
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v11
-; RV64-i64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i64-NEXT:    vmv1r.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float> %x)
   ret <4 x iXLen> %a
@@ -941,125 +339,27 @@ declare <4 x iXLen> @llvm.lround.v4iXLen.v4f32(<4 x float>)
 define <8 x iXLen> @lround_v8f32(<8 x float> %x) nounwind {
 ; RV32-LABEL: lround_v8f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslidedown.vi v11, v8, 2
-; RV32-NEXT:    vslidedown.vi v12, v8, 3
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v11
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v10, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 4
-; RV32-NEXT:    vslide1down.vx v10, v10, a1
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 5
-; RV32-NEXT:    vslide1down.vx v10, v10, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslidedown.vi v12, v8, 6
-; RV32-NEXT:    vslidedown.vi v8, v8, 7
-; RV32-NEXT:    vslide1down.vx v10, v10, a1
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vslide1down.vx v10, v10, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vslide1down.vx v8, v10, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v8f32:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslidedown.vi v11, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 3
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v11
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v10, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 4
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 5
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 7
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a1
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vslide1down.vx v8, v10, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v8f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    addi sp, sp, -128
-; RV64-i64-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    addi s0, sp, 128
-; RV64-i64-NEXT:    andi sp, sp, -64
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 7
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 4
-; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 3
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 1
-; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.l.s a5, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
-; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    sd a4, 32(sp)
-; RV64-i64-NEXT:    sd a3, 40(sp)
-; RV64-i64-NEXT:    sd a2, 48(sp)
-; RV64-i64-NEXT:    sd a1, 56(sp)
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 0(sp)
-; RV64-i64-NEXT:    sd a1, 8(sp)
-; RV64-i64-NEXT:    sd a6, 16(sp)
-; RV64-i64-NEXT:    sd a5, 24(sp)
-; RV64-i64-NEXT:    mv a0, sp
-; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-i64-NEXT:    vle64.v v8, (a0)
-; RV64-i64-NEXT:    addi sp, s0, -128
-; RV64-i64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    addi sp, sp, 128
+; RV64-i64-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV64-i64-NEXT:    vmv2r.v v12, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float> %x)
   ret <8 x iXLen> %a
@@ -1069,230 +369,27 @@ declare <8 x iXLen> @llvm.lround.v8iXLen.v8f32(<8 x float>)
 define <16 x iXLen> @lround_v16f32(<16 x float> %x) nounwind {
 ; RV32-LABEL: lround_v16f32:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -192
-; RV32-NEXT:    sw ra, 188(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 184(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 192
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    mv a0, sp
-; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vse32.v v8, (a0)
-; RV32-NEXT:    flw fa5, 60(sp)
-; RV32-NEXT:    vfmv.f.s fa4, v8
-; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 3
-; RV32-NEXT:    vslidedown.vi v11, v8, 2
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    sw a0, 124(sp)
-; RV32-NEXT:    flw fa5, 56(sp)
-; RV32-NEXT:    fcvt.w.s a0, fa4, rmm
-; RV32-NEXT:    vfmv.f.s fa4, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 1
-; RV32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV32-NEXT:    sw a1, 120(sp)
-; RV32-NEXT:    flw fa5, 52(sp)
-; RV32-NEXT:    fcvt.w.s a1, fa4, rmm
-; RV32-NEXT:    vfmv.f.s fa4, v11
-; RV32-NEXT:    fcvt.w.s a2, fa4, rmm
-; RV32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV32-NEXT:    sw a3, 116(sp)
-; RV32-NEXT:    flw fa5, 48(sp)
-; RV32-NEXT:    vfmv.f.s fa4, v10
-; RV32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 7
-; RV32-NEXT:    fcvt.w.s a3, fa4, rmm
-; RV32-NEXT:    fcvt.w.s a4, fa5, rmm
-; RV32-NEXT:    sw a4, 112(sp)
-; RV32-NEXT:    flw fa5, 44(sp)
-; RV32-NEXT:    vfmv.f.s fa4, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 6
-; RV32-NEXT:    fcvt.w.s a4, fa4, rmm
-; RV32-NEXT:    fcvt.w.s a5, fa5, rmm
-; RV32-NEXT:    sw a5, 108(sp)
-; RV32-NEXT:    flw fa5, 40(sp)
-; RV32-NEXT:    vfmv.f.s fa4, v10
-; RV32-NEXT:    vslidedown.vi v10, v8, 5
-; RV32-NEXT:    fcvt.w.s a5, fa4, rmm
-; RV32-NEXT:    fcvt.w.s a6, fa5, rmm
-; RV32-NEXT:    sw a6, 104(sp)
-; RV32-NEXT:    flw fa5, 36(sp)
-; RV32-NEXT:    vfmv.f.s fa4, v10
-; RV32-NEXT:    fcvt.w.s a6, fa4, rmm
-; RV32-NEXT:    vslidedown.vi v8, v8, 4
-; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    sw a7, 100(sp)
-; RV32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV32-NEXT:    flw fa5, 32(sp)
-; RV32-NEXT:    sw a0, 64(sp)
-; RV32-NEXT:    sw a3, 68(sp)
-; RV32-NEXT:    sw a2, 72(sp)
-; RV32-NEXT:    sw a1, 76(sp)
-; RV32-NEXT:    sw a7, 80(sp)
-; RV32-NEXT:    sw a6, 84(sp)
-; RV32-NEXT:    sw a5, 88(sp)
-; RV32-NEXT:    sw a4, 92(sp)
-; RV32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV32-NEXT:    sw a0, 96(sp)
-; RV32-NEXT:    addi a0, sp, 64
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi sp, s0, -192
-; RV32-NEXT:    lw ra, 188(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 184(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 192
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v16f32:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    addi sp, sp, -192
-; RV64-i32-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    addi s0, sp, 192
-; RV64-i32-NEXT:    andi sp, sp, -64
-; RV64-i32-NEXT:    mv a0, sp
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-i32-NEXT:    vse32.v v8, (a0)
-; RV64-i32-NEXT:    flw fa5, 60(sp)
-; RV64-i32-NEXT:    vfmv.f.s fa4, v8
-; RV64-i32-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-i32-NEXT:    vslidedown.vi v11, v8, 2
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    sw a0, 124(sp)
-; RV64-i32-NEXT:    flw fa5, 56(sp)
-; RV64-i32-NEXT:    fcvt.w.s a0, fa4, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa4, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-i32-NEXT:    fcvt.w.s a1, fa5, rmm
-; RV64-i32-NEXT:    sw a1, 120(sp)
-; RV64-i32-NEXT:    flw fa5, 52(sp)
-; RV64-i32-NEXT:    fcvt.w.s a1, fa4, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa4, v11
-; RV64-i32-NEXT:    fcvt.w.s a2, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.s a3, fa5, rmm
-; RV64-i32-NEXT:    sw a3, 116(sp)
-; RV64-i32-NEXT:    flw fa5, 48(sp)
-; RV64-i32-NEXT:    vfmv.f.s fa4, v10
-; RV64-i32-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 7
-; RV64-i32-NEXT:    fcvt.w.s a3, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.s a4, fa5, rmm
-; RV64-i32-NEXT:    sw a4, 112(sp)
-; RV64-i32-NEXT:    flw fa5, 44(sp)
-; RV64-i32-NEXT:    vfmv.f.s fa4, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 6
-; RV64-i32-NEXT:    fcvt.w.s a4, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.s a5, fa5, rmm
-; RV64-i32-NEXT:    sw a5, 108(sp)
-; RV64-i32-NEXT:    flw fa5, 40(sp)
-; RV64-i32-NEXT:    vfmv.f.s fa4, v10
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-i32-NEXT:    fcvt.w.s a5, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.s a6, fa5, rmm
-; RV64-i32-NEXT:    sw a6, 104(sp)
-; RV64-i32-NEXT:    flw fa5, 36(sp)
-; RV64-i32-NEXT:    vfmv.f.s fa4, v10
-; RV64-i32-NEXT:    fcvt.w.s a6, fa4, rmm
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 4
-; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    sw a7, 100(sp)
-; RV64-i32-NEXT:    fcvt.w.s a7, fa5, rmm
-; RV64-i32-NEXT:    flw fa5, 32(sp)
-; RV64-i32-NEXT:    sw a0, 64(sp)
-; RV64-i32-NEXT:    sw a3, 68(sp)
-; RV64-i32-NEXT:    sw a2, 72(sp)
-; RV64-i32-NEXT:    sw a1, 76(sp)
-; RV64-i32-NEXT:    sw a7, 80(sp)
-; RV64-i32-NEXT:    sw a6, 84(sp)
-; RV64-i32-NEXT:    sw a5, 88(sp)
-; RV64-i32-NEXT:    sw a4, 92(sp)
-; RV64-i32-NEXT:    fcvt.w.s a0, fa5, rmm
-; RV64-i32-NEXT:    sw a0, 96(sp)
-; RV64-i32-NEXT:    addi a0, sp, 64
-; RV64-i32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-i32-NEXT:    vle32.v v8, (a0)
-; RV64-i32-NEXT:    addi sp, s0, -192
-; RV64-i32-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    addi sp, sp, 192
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v16f32:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    addi sp, sp, -384
-; RV64-i64-NEXT:    sd ra, 376(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    sd s0, 368(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    addi s0, sp, 384
-; RV64-i64-NEXT:    andi sp, sp, -128
-; RV64-i64-NEXT:    addi a0, sp, 64
 ; RV64-i64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-i64-NEXT:    vse32.v v8, (a0)
-; RV64-i64-NEXT:    flw fa5, 124(sp)
-; RV64-i64-NEXT:    vfmv.f.s fa4, v8
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 248(sp)
-; RV64-i64-NEXT:    flw fa5, 120(sp)
-; RV64-i64-NEXT:    vslidedown.vi v13, v8, 1
-; RV64-i64-NEXT:    fcvt.l.s a0, fa4, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa4, v10
-; RV64-i64-NEXT:    fcvt.l.s a1, fa5, rmm
-; RV64-i64-NEXT:    sd a1, 240(sp)
-; RV64-i64-NEXT:    flw fa5, 116(sp)
-; RV64-i64-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 7
-; RV64-i64-NEXT:    fcvt.l.s a1, fa4, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa4, v12
-; RV64-i64-NEXT:    fcvt.l.s a2, fa5, rmm
-; RV64-i64-NEXT:    sd a2, 232(sp)
-; RV64-i64-NEXT:    flw fa5, 112(sp)
-; RV64-i64-NEXT:    fcvt.l.s a2, fa4, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa4, v13
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 6
-; RV64-i64-NEXT:    fcvt.l.s a3, fa5, rmm
-; RV64-i64-NEXT:    sd a3, 224(sp)
-; RV64-i64-NEXT:    flw fa5, 108(sp)
-; RV64-i64-NEXT:    fcvt.l.s a3, fa4, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa4, v10
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 5
-; RV64-i64-NEXT:    fcvt.l.s a4, fa5, rmm
-; RV64-i64-NEXT:    sd a4, 216(sp)
-; RV64-i64-NEXT:    flw fa5, 104(sp)
-; RV64-i64-NEXT:    fcvt.l.s a4, fa4, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa4, v12
-; RV64-i64-NEXT:    fcvt.l.s a5, fa4, rmm
-; RV64-i64-NEXT:    fcvt.l.s a6, fa5, rmm
-; RV64-i64-NEXT:    sd a6, 208(sp)
-; RV64-i64-NEXT:    flw fa5, 100(sp)
-; RV64-i64-NEXT:    vfmv.f.s fa4, v10
-; RV64-i64-NEXT:    fcvt.l.s a6, fa4, rmm
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 4
-; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    sd a7, 200(sp)
-; RV64-i64-NEXT:    fcvt.l.s a7, fa5, rmm
-; RV64-i64-NEXT:    flw fa5, 96(sp)
-; RV64-i64-NEXT:    sd a0, 128(sp)
-; RV64-i64-NEXT:    sd a3, 136(sp)
-; RV64-i64-NEXT:    sd a2, 144(sp)
-; RV64-i64-NEXT:    sd a1, 152(sp)
-; RV64-i64-NEXT:    sd a7, 160(sp)
-; RV64-i64-NEXT:    sd a6, 168(sp)
-; RV64-i64-NEXT:    sd a5, 176(sp)
-; RV64-i64-NEXT:    sd a4, 184(sp)
-; RV64-i64-NEXT:    fcvt.l.s a0, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 192(sp)
-; RV64-i64-NEXT:    addi a0, sp, 128
-; RV64-i64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; RV64-i64-NEXT:    vle64.v v8, (a0)
-; RV64-i64-NEXT:    addi sp, s0, -384
-; RV64-i64-NEXT:    ld ra, 376(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    ld s0, 368(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    addi sp, sp, 384
+; RV64-i64-NEXT:    vmv4r.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float> %x)
   ret <16 x iXLen> %a
@@ -1302,26 +399,28 @@ declare <16 x iXLen> @llvm.lround.v16iXLen.v16f32(<16 x float>)
 define <1 x iXLen> @lround_v1f64(<1 x double> %x) nounwind {
 ; RV32-LABEL: lround_v1f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vmv.s.x v8, a0
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT:    vfncvt.x.f.w v9, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v1f64:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vmv.s.x v8, a0
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfncvt.x.f.w v9, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv1r.v v8, v9
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v1f64:
 ; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
 ; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    vmv.s.x v8, a0
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double> %x)
   ret <1 x iXLen> %a
@@ -1331,40 +430,28 @@ declare <1 x iXLen> @llvm.lround.v1iXLen.v1f64(<1 x double>)
 define <2 x iXLen> @lround_v2f64(<2 x double> %x) nounwind {
 ; RV32-LABEL: lround_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v9, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v9
-; RV32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
+; RV32-NEXT:    vfncvt.x.f.w v9, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv1r.v v8, v9
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v2f64:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v9
-; RV64-i32-NEXT:    fcvt.w.d a1, fa5, rmm
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v8, a0
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i32-NEXT:    vfncvt.x.f.w v9, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv1r.v v8, v9
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v2f64:
 ; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
 ; RV64-i64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v9, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v9
-; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-i64-NEXT:    vmv.v.x v8, a0
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a1
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double> %x)
   ret <2 x iXLen> %a
@@ -1374,72 +461,28 @@ declare <2 x iXLen> @llvm.lround.v2iXLen.v2f64(<2 x double>)
 define <4 x iXLen> @lround_v4f64(<4 x double> %x) nounwind {
 ; RV32-LABEL: lround_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v10, v8, 2
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    fcvt.w.d a1, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v10
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV32-NEXT:    vmv.v.x v9, a0
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV32-NEXT:    vslide1down.vx v8, v9, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
+; RV32-NEXT:    vfncvt.x.f.w v10, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv.v.v v8, v10
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v4f64:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    fcvt.w.d a1, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v10
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v9, a0
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; RV64-i32-NEXT:    vslide1down.vx v8, v9, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT:    vfncvt.x.f.w v10, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv.v.v v8, v10
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v4f64:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v12, v8, 1
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 2
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v12
-; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v10
+; RV64-i64-NEXT:    fsrmi a0, 4
 ; RV64-i64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV64-i64-NEXT:    vmv.v.x v10, a0
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    vslide1down.vx v8, v10, a1
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    vslide1down.vx v8, v8, a0
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double> %x)
   ret <4 x iXLen> %a
@@ -1449,146 +492,73 @@ declare <4 x iXLen> @llvm.lround.v4iXLen.v4f64(<4 x double>)
 define <8 x iXLen> @lround_v8f64(<8 x double> %x) nounwind {
 ; RV32-LABEL: lround_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -128
-; RV32-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
-; RV32-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
-; RV32-NEXT:    addi s0, sp, 128
-; RV32-NEXT:    andi sp, sp, -64
-; RV32-NEXT:    mv a0, sp
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslidedown.vi v14, v8, 1
-; RV32-NEXT:    vfmv.f.s fa5, v8
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v12, v8, 2
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vse64.v v8, (a0)
-; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT:    vslidedown.vi v8, v8, 3
-; RV32-NEXT:    vfmv.f.s fa4, v14
-; RV32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV32-NEXT:    vfmv.f.s fa5, v12
-; RV32-NEXT:    vfmv.f.s fa3, v8
-; RV32-NEXT:    fcvt.w.d a1, fa4, rmm
-; RV32-NEXT:    fcvt.w.d a2, fa5, rmm
-; RV32-NEXT:    fcvt.w.d a3, fa3, rmm
-; RV32-NEXT:    fld fa5, 32(sp)
-; RV32-NEXT:    fld fa4, 40(sp)
-; RV32-NEXT:    fld fa3, 48(sp)
-; RV32-NEXT:    fld fa2, 56(sp)
-; RV32-NEXT:    fcvt.w.d a4, fa5, rmm
-; RV32-NEXT:    fcvt.w.d a5, fa4, rmm
-; RV32-NEXT:    fcvt.w.d a6, fa3, rmm
+; RV32-NEXT:    fsrmi a0, 4
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT:    vmv.v.x v8, a0
-; RV32-NEXT:    vslide1down.vx v8, v8, a1
-; RV32-NEXT:    vslide1down.vx v8, v8, a2
-; RV32-NEXT:    vslide1down.vx v8, v8, a3
-; RV32-NEXT:    vslide1down.vx v8, v8, a4
-; RV32-NEXT:    vslide1down.vx v8, v8, a5
-; RV32-NEXT:    vslide1down.vx v8, v8, a6
-; RV32-NEXT:    fcvt.w.d a0, fa2, rmm
-; RV32-NEXT:    vslide1down.vx v8, v8, a0
-; RV32-NEXT:    addi sp, s0, -128
-; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
-; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
-; RV32-NEXT:    addi sp, sp, 128
+; RV32-NEXT:    vfncvt.x.f.w v12, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv.v.v v8, v12
 ; RV32-NEXT:    ret
 ;
 ; RV64-i32-LABEL: lround_v8f64:
 ; RV64-i32:       # %bb.0:
-; RV64-i32-NEXT:    addi sp, sp, -128
-; RV64-i32-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
-; RV64-i32-NEXT:    addi s0, sp, 128
-; RV64-i32-NEXT:    andi sp, sp, -64
-; RV64-i32-NEXT:    mv a0, sp
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v14, v8, 1
-; RV64-i32-NEXT:    vfmv.f.s fa5, v8
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v12, v8, 2
-; RV64-i32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-i32-NEXT:    vse64.v v8, (a0)
-; RV64-i32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i32-NEXT:    vslidedown.vi v8, v8, 3
-; RV64-i32-NEXT:    vfmv.f.s fa4, v14
-; RV64-i32-NEXT:    fcvt.w.d a0, fa5, rmm
-; RV64-i32-NEXT:    vfmv.f.s fa5, v12
-; RV64-i32-NEXT:    vfmv.f.s fa3, v8
-; RV64-i32-NEXT:    fcvt.w.d a1, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.d a2, fa5, rmm
-; RV64-i32-NEXT:    fcvt.w.d a3, fa3, rmm
-; RV64-i32-NEXT:    fld fa5, 32(sp)
-; RV64-i32-NEXT:    fld fa4, 40(sp)
-; RV64-i32-NEXT:    fld fa3, 48(sp)
-; RV64-i32-NEXT:    fld fa2, 56(sp)
-; RV64-i32-NEXT:    fcvt.w.d a4, fa5, rmm
-; RV64-i32-NEXT:    fcvt.w.d a5, fa4, rmm
-; RV64-i32-NEXT:    fcvt.w.d a6, fa3, rmm
+; RV64-i32-NEXT:    fsrmi a0, 4
 ; RV64-i32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT:    vmv.v.x v8, a0
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a1
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a2
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a3
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a4
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a5
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a6
-; RV64-i32-NEXT:    fcvt.w.d a0, fa2, rmm
-; RV64-i32-NEXT:    vslide1down.vx v8, v8, a0
-; RV64-i32-NEXT:    addi sp, s0, -128
-; RV64-i32-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
-; RV64-i32-NEXT:    addi sp, sp, 128
+; RV64-i32-NEXT:    vfncvt.x.f.w v12, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv.v.v v8, v12
 ; RV64-i32-NEXT:    ret
 ;
 ; RV64-i64-LABEL: lround_v8f64:
 ; RV64-i64:       # %bb.0:
-; RV64-i64-NEXT:    addi sp, sp, -192
-; RV64-i64-NEXT:    sd ra, 184(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    sd s0, 176(sp) # 8-byte Folded Spill
-; RV64-i64-NEXT:    addi s0, sp, 192
-; RV64-i64-NEXT:    andi sp, sp, -64
-; RV64-i64-NEXT:    mv a0, sp
-; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-i64-NEXT:    vse64.v v8, (a0)
-; RV64-i64-NEXT:    fld fa5, 56(sp)
-; RV64-i64-NEXT:    vfmv.f.s fa4, v8
-; RV64-i64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 1
-; RV64-i64-NEXT:    fcvt.l.d a0, fa4, rmm
-; RV64-i64-NEXT:    fcvt.l.d a1, fa5, rmm
-; RV64-i64-NEXT:    sd a1, 120(sp)
-; RV64-i64-NEXT:    fld fa5, 48(sp)
-; RV64-i64-NEXT:    vfmv.f.s fa4, v10
-; RV64-i64-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i64-NEXT:    vslidedown.vi v10, v8, 3
-; RV64-i64-NEXT:    fcvt.l.d a1, fa4, rmm
-; RV64-i64-NEXT:    fcvt.l.d a2, fa5, rmm
-; RV64-i64-NEXT:    sd a2, 112(sp)
-; RV64-i64-NEXT:    fld fa5, 40(sp)
-; RV64-i64-NEXT:    vfmv.f.s fa4, v10
-; RV64-i64-NEXT:    fcvt.l.d a2, fa4, rmm
-; RV64-i64-NEXT:    vslidedown.vi v8, v8, 2
-; RV64-i64-NEXT:    fcvt.l.d a3, fa5, rmm
-; RV64-i64-NEXT:    vfmv.f.s fa5, v8
-; RV64-i64-NEXT:    sd a3, 104(sp)
-; RV64-i64-NEXT:    fcvt.l.d a3, fa5, rmm
-; RV64-i64-NEXT:    fld fa5, 32(sp)
-; RV64-i64-NEXT:    sd a0, 64(sp)
-; RV64-i64-NEXT:    sd a1, 72(sp)
-; RV64-i64-NEXT:    sd a3, 80(sp)
-; RV64-i64-NEXT:    sd a2, 88(sp)
-; RV64-i64-NEXT:    fcvt.l.d a0, fa5, rmm
-; RV64-i64-NEXT:    sd a0, 96(sp)
-; RV64-i64-NEXT:    addi a0, sp, 64
+; RV64-i64-NEXT:    fsrmi a0, 4
 ; RV64-i64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV64-i64-NEXT:    vle64.v v8, (a0)
-; RV64-i64-NEXT:    addi sp, s0, -192
-; RV64-i64-NEXT:    ld ra, 184(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    ld s0, 176(sp) # 8-byte Folded Reload
-; RV64-i64-NEXT:    addi sp, sp, 192
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
 ; RV64-i64-NEXT:    ret
   %a = call <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double> %x)
   ret <8 x iXLen> %a
 }
 declare <8 x iXLen> @llvm.lround.v8iXLen.v8f64(<8 x double>)
+
+define <32 x iXLen> @lround_v32bf16(<32 x bfloat> %x) {
+; RV32-LABEL: lround_v32bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a0, 32
+; RV32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v16
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_v32bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    li a0, 32
+; RV64-i32-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_v32bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetivli zero, 16, e16, m4, ta, ma
+; RV64-i64-NEXT:    vslidedown.vi v20, v8, 16
+; RV64-i64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v24, v20
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat> %x)
+  ret <32 x iXLen> %a
+}
+declare <32 x iXLen> @llvm.lround.v32iXLen.v32bf16(<32 x bfloat>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll
new file mode 100644
index 0000000000000..5b4c7ba91400f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/llround-sdnode.ll
@@ -0,0 +1,282 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i64> @llround_nxv1i64_nxv1f32(<vscale x 1 x float> %x) {
+; CHECK-LABEL: llround_nxv1i64_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v9, v8
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x i64> @llround_nxv2i64_nxv2f32(<vscale x 2 x float> %x) {
+; CHECK-LABEL: llround_nxv2i64_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v10
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x i64> @llround_nxv4i64_nxv4f32(<vscale x 4 x float> %x) {
+; CHECK-LABEL: llround_nxv4i64_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v12, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v12
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x i64> @llround_nxv8i64_nxv8f32(<vscale x 8 x float> %x) {
+; CHECK-LABEL: llround_nxv8i64_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv4r.v v16, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v16
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x i64> @llround_nxv16i64_nxv16f32(<vscale x 16 x float> %x) {
+; CHECK-LABEL: llround_nxv16i64_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v24, v8
+; CHECK-NEXT:    vfwcvt.x.f.v v16, v12
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 1 x i64> @llround_nxv1i64_nxv1f64(<vscale x 1 x double> %x) {
+; CHECK-LABEL: llround_nxv1i64_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v8, v8
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x i64> @llround_nxv2i64_nxv2f64(<vscale x 2 x double> %x) {
+; CHECK-LABEL: llround_nxv2i64_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v8, v8
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x i64> @llround_nxv4i64_nxv4f64(<vscale x 4 x double> %x) {
+; CHECK-LABEL: llround_nxv4i64_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v8, v8
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x i64> @llround_nxv8i64_nxv8f64(<vscale x 8 x double> %x) {
+; CHECK-LABEL: llround_nxv8i64_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v8, v8
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 1 x i64> @llround_nxv1f16(<vscale x 1 x half> %x) {
+; CHECK-LABEL: llround_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvt.f.f.v v9, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v9
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x i64> @llround_nxv2f16(<vscale x 2 x half> %x) {
+; CHECK-LABEL: llround_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.f.v v10, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v10
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x i64> @llround_nxv4f16(<vscale x 4 x half> %x) {
+; CHECK-LABEL: llround_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.f.f.v v12, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v12
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x i64> @llround_nxv8f16(<vscale x 8 x half> %x) {
+; CHECK-LABEL: llround_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v16
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x i64> @llround_nxv16f16(<vscale x 16 x half> %x) {
+; CHECK-LABEL: llround_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.f.f.v v16, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfwcvt.f.f.v v24, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v16
+; CHECK-NEXT:    vfwcvt.x.f.v v16, v24
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 1 x i64> @llround_nxv1bf16(<vscale x 1 x bfloat> %x) {
+; CHECK-LABEL: llround_nxv1bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v9
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat> %x)
+  ret <vscale x 1 x i64> %a
+}
+declare <vscale x 1 x i64> @llvm.llround.nxv1i64.nxv1bf16(<vscale x 1 x bfloat>)
+
+define <vscale x 2 x i64> @llround_nxv2bf16(<vscale x 2 x bfloat> %x) {
+; CHECK-LABEL: llround_nxv2bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v10
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat> %x)
+  ret <vscale x 2 x i64> %a
+}
+declare <vscale x 2 x i64> @llvm.llround.nxv2i64.nxv2bf16(<vscale x 2 x bfloat>)
+
+define <vscale x 4 x i64> @llround_nxv4bf16(<vscale x 4 x bfloat> %x) {
+; CHECK-LABEL: llround_nxv4bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v12
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat> %x)
+  ret <vscale x 4 x i64> %a
+}
+declare <vscale x 4 x i64> @llvm.llround.nxv4i64.nxv4bf16(<vscale x 4 x bfloat>)
+
+define <vscale x 8 x i64> @llround_nxv8bf16(<vscale x 8 x bfloat> %x) {
+; CHECK-LABEL: llround_nxv8bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v16
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat> %x)
+  ret <vscale x 8 x i64> %a
+}
+declare <vscale x 8 x i64> @llvm.llround.nxv8i64.nxv8bf16(<vscale x 8 x bfloat>)
+
+define <vscale x 16 x i64> @llround_nxv16bf16(<vscale x 16 x bfloat> %x) {
+; CHECK-LABEL: llround_nxv16bf16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfwcvt.x.f.v v8, v16
+; CHECK-NEXT:    vfwcvt.x.f.v v16, v24
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat> %x)
+  ret <vscale x 16 x i64> %a
+}
+declare <vscale x 16 x i64> @llvm.llround.nxv16i64.nxv16bf16(<vscale x 16 x bfloat>)
diff --git a/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll
new file mode 100644
index 0000000000000..109b9055e7b55
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/lround-sdnode.ll
@@ -0,0 +1,759 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
+; RUN:     -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d,+zvfhmin,+zvfbfmin \
+; RUN:     -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64
+
+define <vscale x 1 x iXLen> @lround_nxv1f32(<vscale x 1 x float> %x) {
+; RV32-LABEL: lround_nxv1f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv1f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv1f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v9, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    vmv1r.v v8, v9
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f32(<vscale x 1 x float> %x)
+  ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f32(<vscale x 1 x float>)
+
+define <vscale x 2 x iXLen> @lround_nxv2f32(<vscale x 2 x float> %x) {
+; RV32-LABEL: lround_nxv2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv2f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv2f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vmv1r.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f32(<vscale x 2 x float> %x)
+  ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f32(<vscale x 2 x float>)
+
+define <vscale x 4 x iXLen> @lround_nxv4f32(<vscale x 4 x float> %x) {
+; RV32-LABEL: lround_nxv4f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv4f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv4f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV64-i64-NEXT:    vmv2r.v v12, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f32(<vscale x 4 x float> %x)
+  ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f32(<vscale x 4 x float>)
+
+define <vscale x 8 x iXLen> @lround_nxv8f32(<vscale x 8 x float> %x) {
+; RV32-LABEL: lround_nxv8f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv8f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv8f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vmv4r.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f32(<vscale x 8 x float> %x)
+  ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f32(<vscale x 8 x float>)
+
+define <vscale x 16 x iXLen> @lround_nxv16f32(<vscale x 16 x float> %x) {
+; RV32-LABEL: lround_nxv16f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv16f32:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv16f32:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v24, v8
+; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v12
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    vmv8r.v v8, v24
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x)
+  ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16f32(<vscale x 16 x float>)
+
+define <vscale x 1 x iXLen> @lround_nxv1f64(<vscale x 1 x double> %x) {
+; RV32-LABEL: lround_nxv1f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfncvt.x.f.w v9, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv1r.v v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv1f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfncvt.x.f.w v9, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv1r.v v8, v9
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv1f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f64(<vscale x 1 x double> %x)
+  ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f64(<vscale x 1 x double>)
+
+define <vscale x 2 x iXLen> @lround_nxv2f64(<vscale x 2 x double> %x) {
+; RV32-LABEL: lround_nxv2f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfncvt.x.f.w v10, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv.v.v v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv2f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vfncvt.x.f.w v10, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv.v.v v8, v10
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv2f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f64(<vscale x 2 x double> %x)
+  ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f64(<vscale x 2 x double>)
+
+define <vscale x 4 x iXLen> @lround_nxv4f64(<vscale x 4 x double> %x) {
+; RV32-LABEL: lround_nxv4f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfncvt.x.f.w v12, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv.v.v v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv4f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vfncvt.x.f.w v12, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv.v.v v8, v12
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv4f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f64(<vscale x 4 x double> %x)
+  ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f64(<vscale x 4 x double>)
+
+define <vscale x 8 x iXLen> @lround_nxv8f64(<vscale x 8 x double> %x) {
+; RV32-LABEL: lround_nxv8f64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfncvt.x.f.w v16, v8
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    vmv.v.v v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv8f64:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-i32-NEXT:    vfncvt.x.f.w v16, v8
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    vmv.v.v v8, v16
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv8f64:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-i64-NEXT:    vfcvt.x.f.v v8, v8
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f64(<vscale x 8 x double> %x)
+  ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f64(<vscale x 8 x double>)
+
+define <vscale x 1 x iXLen> @lround_nxv1f16(<vscale x 1 x half> %x) {
+; RV32-LABEL: lround_nxv1f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv1f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv1f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f16(<vscale x 1 x half> %x)
+  ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1f16(<vscale x 1 x half>)
+
+define <vscale x 2 x iXLen> @lround_nxv2f16(<vscale x 2 x half> %x) {
+; RV32-LABEL: lround_nxv2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv2f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv2f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f16(<vscale x 2 x half> %x)
+  ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2f16(<vscale x 2 x half>)
+
+define <vscale x 4 x iXLen> @lround_nxv4f16(<vscale x 4 x half> %x) {
+; RV32-LABEL: lround_nxv4f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv4f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v10, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv4f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v12, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f16(<vscale x 4 x half> %x)
+  ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4f16(<vscale x 4 x half>)
+
+define <vscale x 8 x iXLen> @lround_nxv8f16(<vscale x 8 x half> %x) {
+; RV32-LABEL: lround_nxv8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v12, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v12
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv8f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v12, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv8f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f16(<vscale x 8 x half> %x)
+  ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8f16(<vscale x 8 x half>)
+
+define <vscale x 16 x iXLen> @lround_nxv16f16(<vscale x 16 x half> %x) {
+; RV32-LABEL: lround_nxv16f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vfwcvt.f.f.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v16
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv16f16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT:    vfwcvt.f.f.v v16, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv16f16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvt.f.f.v v24, v10
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16f16(<vscale x 16 x half> %x)
+  ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16f16(<vscale x 16 x half>)
+
+define <vscale x 1 x iXLen> @lround_nxv1bf16(<vscale x 1 x bfloat> %x) {
+; RV32-LABEL: lround_nxv1bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv1bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv1bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v9
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat> %x)
+  ret <vscale x 1 x iXLen> %a
+}
+declare <vscale x 1 x iXLen> @llvm.lround.nxv1iXLen.nxv1bf16(<vscale x 1 x bfloat>)
+
+define <vscale x 2 x iXLen> @lround_nxv2bf16(<vscale x 2 x bfloat> %x) {
+; RV32-LABEL: lround_nxv2bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v9
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv2bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v9, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v9
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv2bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v10
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat> %x)
+  ret <vscale x 2 x iXLen> %a
+}
+declare <vscale x 2 x iXLen> @llvm.lround.nxv2iXLen.nxv2bf16(<vscale x 2 x bfloat>)
+
+define <vscale x 4 x iXLen> @lround_nxv4bf16(<vscale x 4 x bfloat> %x) {
+; RV32-LABEL: lround_nxv4bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v10
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv4bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v10, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v10
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv4bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v12
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat> %x)
+  ret <vscale x 4 x iXLen> %a
+}
+declare <vscale x 4 x iXLen> @llvm.lround.nxv4iXLen.nxv4bf16(<vscale x 4 x bfloat>)
+
+define <vscale x 8 x iXLen> @lround_nxv8bf16(<vscale x 8 x bfloat> %x) {
+; RV32-LABEL: lround_nxv8bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v12
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv8bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v12, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v12
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv8bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat> %x)
+  ret <vscale x 8 x iXLen> %a
+}
+declare <vscale x 8 x iXLen> @llvm.lround.nxv8iXLen.nxv8bf16(<vscale x 8 x bfloat>)
+
+define <vscale x 16 x iXLen> @lround_nxv16bf16(<vscale x 16 x bfloat> %x) {
+; RV32-LABEL: lround_nxv16bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v16
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv16bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv16bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v24, v10
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v16
+; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v24
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat> %x)
+  ret <vscale x 16 x iXLen> %a
+}
+declare <vscale x 16 x iXLen> @llvm.lround.nxv16iXLen.nxv16bf16(<vscale x 16 x bfloat>)
+
+define <vscale x 32 x iXLen> @lround_nxv32bf16(<vscale x 32 x bfloat> %x) {
+; RV32-LABEL: lround_nxv32bf16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV32-NEXT:    fsrmi a0, 4
+; RV32-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; RV32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV32-NEXT:    vfcvt.x.f.v v8, v16
+; RV32-NEXT:    vfcvt.x.f.v v16, v24
+; RV32-NEXT:    fsrm a0
+; RV32-NEXT:    ret
+;
+; RV64-i32-LABEL: lround_nxv32bf16:
+; RV64-i32:       # %bb.0:
+; RV64-i32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i32-NEXT:    fsrmi a0, 4
+; RV64-i32-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; RV64-i32-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; RV64-i32-NEXT:    vfcvt.x.f.v v8, v16
+; RV64-i32-NEXT:    vfcvt.x.f.v v16, v24
+; RV64-i32-NEXT:    fsrm a0
+; RV64-i32-NEXT:    ret
+;
+; RV64-i64-LABEL: lround_nxv32bf16:
+; RV64-i64:       # %bb.0:
+; RV64-i64-NEXT:    addi sp, sp, -64
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 64
+; RV64-i64-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-i64-NEXT:    .cfi_offset ra, -8
+; RV64-i64-NEXT:    .cfi_offset s0, -16
+; RV64-i64-NEXT:    .cfi_offset s1, -24
+; RV64-i64-NEXT:    csrr a1, vlenb
+; RV64-i64-NEXT:    slli a1, a1, 5
+; RV64-i64-NEXT:    sub sp, sp, a1
+; RV64-i64-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 32 * vlenb
+; RV64-i64-NEXT:    mv s0, a0
+; RV64-i64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; RV64-i64-NEXT:    fsrmi a0, 4
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v20, v10
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v8, v12
+; RV64-i64-NEXT:    vfwcvtbf16.f.f.v v24, v14
+; RV64-i64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-i64-NEXT:    vfwcvt.x.f.v v0, v16
+; RV64-i64-NEXT:    csrr a1, vlenb
+; RV64-i64-NEXT:    slli a1, a1, 3
+; RV64-i64-NEXT:    mv a2, a1
+; RV64-i64-NEXT:    slli a1, a1, 1
+; RV64-i64-NEXT:    add a1, a1, a2
+; RV64-i64-NEXT:    add a1, sp, a1
+; RV64-i64-NEXT:    addi a1, a1, 32
+; RV64-i64-NEXT:    vs8r.v v0, (a1) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT:    vfwcvt.x.f.v v0, v20
+; RV64-i64-NEXT:    csrr a1, vlenb
+; RV64-i64-NEXT:    slli a1, a1, 4
+; RV64-i64-NEXT:    add a1, sp, a1
+; RV64-i64-NEXT:    addi a1, a1, 32
+; RV64-i64-NEXT:    vs8r.v v0, (a1) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT:    vfwcvt.x.f.v v16, v8
+; RV64-i64-NEXT:    csrr a1, vlenb
+; RV64-i64-NEXT:    slli a1, a1, 3
+; RV64-i64-NEXT:    add a1, sp, a1
+; RV64-i64-NEXT:    addi a1, a1, 32
+; RV64-i64-NEXT:    vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT:    vfwcvt.x.f.v v8, v24
+; RV64-i64-NEXT:    addi a1, sp, 32
+; RV64-i64-NEXT:    vs8r.v v8, (a1) # vscale x 64-byte Folded Spill
+; RV64-i64-NEXT:    csrr s1, vlenb
+; RV64-i64-NEXT:    li a1, 24
+; RV64-i64-NEXT:    fsrm a0
+; RV64-i64-NEXT:    mv a0, s1
+; RV64-i64-NEXT:    call __muldi3
+; RV64-i64-NEXT:    add a0, s0, a0
+; RV64-i64-NEXT:    addi a1, sp, 32
+; RV64-i64-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT:    vs8r.v v8, (a0)
+; RV64-i64-NEXT:    slli a0, s1, 4
+; RV64-i64-NEXT:    slli s1, s1, 3
+; RV64-i64-NEXT:    add a0, s0, a0
+; RV64-i64-NEXT:    add s1, s0, s1
+; RV64-i64-NEXT:    csrr a1, vlenb
+; RV64-i64-NEXT:    slli a1, a1, 3
+; RV64-i64-NEXT:    add a1, sp, a1
+; RV64-i64-NEXT:    addi a1, a1, 32
+; RV64-i64-NEXT:    vl8r.v v8, (a1) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT:    vs8r.v v8, (a0)
+; RV64-i64-NEXT:    csrr a0, vlenb
+; RV64-i64-NEXT:    slli a0, a0, 4
+; RV64-i64-NEXT:    add a0, sp, a0
+; RV64-i64-NEXT:    addi a0, a0, 32
+; RV64-i64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT:    vs8r.v v8, (s1)
+; RV64-i64-NEXT:    csrr a0, vlenb
+; RV64-i64-NEXT:    slli a0, a0, 3
+; RV64-i64-NEXT:    mv a1, a0
+; RV64-i64-NEXT:    slli a0, a0, 1
+; RV64-i64-NEXT:    add a0, a0, a1
+; RV64-i64-NEXT:    add a0, sp, a0
+; RV64-i64-NEXT:    addi a0, a0, 32
+; RV64-i64-NEXT:    vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; RV64-i64-NEXT:    vs8r.v v8, (s0)
+; RV64-i64-NEXT:    csrr a0, vlenb
+; RV64-i64-NEXT:    slli a0, a0, 5
+; RV64-i64-NEXT:    add sp, sp, a0
+; RV64-i64-NEXT:    .cfi_def_cfa sp, 64
+; RV64-i64-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-i64-NEXT:    .cfi_restore ra
+; RV64-i64-NEXT:    .cfi_restore s0
+; RV64-i64-NEXT:    .cfi_restore s1
+; RV64-i64-NEXT:    addi sp, sp, 64
+; RV64-i64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-i64-NEXT:    ret
+  %a = call <vscale x 32 x iXLen> @llvm.lround.nxv32iXLen.nxv32bf16(<vscale x 32 x bfloat> %x)
+  ret <vscale x 32 x iXLen> %a
+}
+declare <vscale x 32 x iXLen> @llvm.lround.nxv32iXLen.nxv32bf16(<vscale x 32 x bfloat>)



More information about the llvm-commits mailing list