[llvm] effcd18 - [RISCV] Remove codegen for VP float rounding intrinsics (#189896)

via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 1 04:05:00 PDT 2026


Author: Luke Lau
Date: 2026-04-01T11:04:53Z
New Revision: effcd181e580c6f83dfc88a0362705ddb6601f5b

URL: https://github.com/llvm/llvm-project/commit/effcd181e580c6f83dfc88a0362705ddb6601f5b
DIFF: https://github.com/llvm/llvm-project/commit/effcd181e580c6f83dfc88a0362705ddb6601f5b.diff

LOG: [RISCV] Remove codegen for VP float rounding intrinsics (#189896)

Part of the work to remove trivial VP intrinsics from the RISC-V
backend, see
https://discourse.llvm.org/t/rfc-remove-codegen-support-for-trivial-vp-intrinsics-in-the-risc-v-backend/87999

This splits off seven intrinsics from #179622.

We now generate vfcvt.rtz for llvm.vp.roundtozero. It looks like we
should have been using the codegen for llvm.trunc for it, but we somehow
missed that.

Added: 
    

Modified: 
    llvm/lib/CodeGen/ExpandVectorPredication.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
    llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
    llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
    llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
    llvm/test/CodeGen/RISCV/rvv/round-vp.ll
    llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
    llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index 5f03c9ef9ade3..06c6f9698715d 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -290,6 +290,13 @@ bool CachingVPExpander::expandPredicationToFPCall(
   case Intrinsic::minnum:
   case Intrinsic::maximum:
   case Intrinsic::minimum:
+  case Intrinsic::ceil:
+  case Intrinsic::floor:
+  case Intrinsic::round:
+  case Intrinsic::roundeven:
+  case Intrinsic::trunc:
+  case Intrinsic::rint:
+  case Intrinsic::nearbyint:
   case Intrinsic::lrint:
   case Intrinsic::llrint: {
     SmallVector<Value *, 2> Argument;
@@ -617,6 +624,13 @@ bool CachingVPExpander::expandPredication(VPIntrinsic &VPI) {
   case Intrinsic::vp_minnum:
   case Intrinsic::vp_maximum:
   case Intrinsic::vp_minimum:
+  case Intrinsic::vp_ceil:
+  case Intrinsic::vp_floor:
+  case Intrinsic::vp_round:
+  case Intrinsic::vp_roundeven:
+  case Intrinsic::vp_roundtozero:
+  case Intrinsic::vp_rint:
+  case Intrinsic::vp_nearbyint:
   case Intrinsic::vp_lrint:
   case Intrinsic::vp_llrint:
   case Intrinsic::vp_fma:

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cb6489fbeae09..aa93acde602d2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -890,10 +890,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         ISD::VP_SELECT,      ISD::VP_SINT_TO_FP,  ISD::VP_UINT_TO_FP,
         ISD::VP_SETCC,       ISD::VP_FP_ROUND,    ISD::VP_FP_EXTEND,
         ISD::VP_SQRT,        ISD::VP_FMINNUM,     ISD::VP_FMAXNUM,
-        ISD::VP_FCEIL,       ISD::VP_FFLOOR,      ISD::VP_FROUND,
-        ISD::VP_FROUNDEVEN,  ISD::VP_FCOPYSIGN,   ISD::VP_FROUNDTOZERO,
-        ISD::VP_FRINT,       ISD::VP_FNEARBYINT,  ISD::VP_IS_FPCLASS,
-        ISD::VP_REDUCE_FMINIMUM, ISD::VP_REDUCE_FMAXIMUM};
+        ISD::VP_FCOPYSIGN,   ISD::VP_IS_FPCLASS,  ISD::VP_REDUCE_FMINIMUM,
+        ISD::VP_REDUCE_FMAXIMUM};
 
     static const unsigned IntegerVecReduceOps[] = {
         ISD::VECREDUCE_ADD,  ISD::VECREDUCE_AND,  ISD::VECREDUCE_OR,
@@ -1224,13 +1222,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         ISD::VP_SQRT,
         ISD::VP_FMINNUM,
         ISD::VP_FMAXNUM,
-        ISD::VP_FCEIL,
-        ISD::VP_FFLOOR,
-        ISD::VP_FROUND,
-        ISD::VP_FROUNDEVEN,
-        ISD::VP_FROUNDTOZERO,
-        ISD::VP_FRINT,
-        ISD::VP_FNEARBYINT,
         ISD::VP_SETCC,
         ISD::VP_REDUCE_FMINIMUM,
         ISD::VP_REDUCE_FMAXIMUM};
@@ -3596,19 +3587,15 @@ static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
   switch (Opc) {
   case ISD::FROUNDEVEN:
   case ISD::STRICT_FROUNDEVEN:
-  case ISD::VP_FROUNDEVEN:
     return RISCVFPRndMode::RNE;
   case ISD::FTRUNC:
   case ISD::STRICT_FTRUNC:
-  case ISD::VP_FROUNDTOZERO:
     return RISCVFPRndMode::RTZ;
   case ISD::FFLOOR:
   case ISD::STRICT_FFLOOR:
-  case ISD::VP_FFLOOR:
     return RISCVFPRndMode::RDN;
   case ISD::FCEIL:
   case ISD::STRICT_FCEIL:
-  case ISD::VP_FCEIL:
     return RISCVFPRndMode::RUP;
   case ISD::FROUND:
   case ISD::LROUND:
@@ -3616,7 +3603,6 @@ static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
   case ISD::STRICT_FROUND:
   case ISD::STRICT_LROUND:
   case ISD::STRICT_LLROUND:
-  case ISD::VP_FROUND:
     return RISCVFPRndMode::RMM;
   case ISD::FRINT:
   case ISD::LRINT:
@@ -3624,15 +3610,13 @@ static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {
   case ISD::STRICT_FRINT:
   case ISD::STRICT_LRINT:
   case ISD::STRICT_LLRINT:
-  case ISD::VP_FRINT:
     return RISCVFPRndMode::DYN;
   }
 
   return RISCVFPRndMode::Invalid;
 }
 
-// Expand vector FTRUNC, FCEIL, FFLOOR, FROUND, VP_FCEIL, VP_FFLOOR, VP_FROUND
-// VP_FROUNDEVEN, VP_FROUNDTOZERO, VP_FRINT and VP_FNEARBYINT by converting to
+// Expand vector FTRUNC, FCEIL, FFLOOR and FROUND by converting to
 // the integer domain and back. Taking care to avoid converting values that are
 // nan or already correct.
 static SDValue
@@ -3654,16 +3638,7 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
   }
 
-  SDValue Mask, VL;
-  if (Op->isVPOpcode()) {
-    Mask = Op.getOperand(1);
-    if (VT.isFixedLengthVector())
-      Mask = convertToScalableVector(getMaskTypeFor(ContainerVT), Mask, DAG,
-                                     Subtarget);
-    VL = Op.getOperand(2);
-  } else {
-    std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
-  }
+  auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
   // We do the conversion on the absolute value and fix the sign at the end.
   SDValue Abs = DAG.getNode(RISCVISD::FABS_VL, DL, ContainerVT, Src, Mask, VL);
@@ -3697,16 +3672,10 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
   default:
     llvm_unreachable("Unexpected opcode");
   case ISD::FRINT:
-  case ISD::VP_FRINT:
   case ISD::FCEIL:
-  case ISD::VP_FCEIL:
   case ISD::FFLOOR:
-  case ISD::VP_FFLOOR:
   case ISD::FROUND:
-  case ISD::FROUNDEVEN:
-  case ISD::VP_FROUND:
-  case ISD::VP_FROUNDEVEN:
-  case ISD::VP_FROUNDTOZERO: {
+  case ISD::FROUNDEVEN: {
     RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Op.getOpcode());
     assert(FRM != RISCVFPRndMode::Invalid);
     Truncated = DAG.getNode(RISCVISD::VFCVT_RM_X_F_VL, DL, IntVT, Src, Mask,
@@ -3718,7 +3687,6 @@ lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
                             Mask, VL);
     break;
   case ISD::FNEARBYINT:
-  case ISD::VP_FNEARBYINT:
     Truncated = DAG.getNode(RISCVISD::VFROUND_NOEXCEPT_VL, DL, ContainerVT, Src,
                             Mask, VL);
     break;
@@ -9152,16 +9120,6 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerVPStridedLoad(Op, DAG);
   case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
     return lowerVPStridedStore(Op, DAG);
-  case ISD::VP_FCEIL:
-  case ISD::VP_FFLOOR:
-  case ISD::VP_FRINT:
-  case ISD::VP_FNEARBYINT:
-  case ISD::VP_FROUND:
-  case ISD::VP_FROUNDEVEN:
-  case ISD::VP_FROUNDTOZERO:
-    if (isPromotedOpNeedingSplit(Op, Subtarget))
-      return SplitVPOp(Op, DAG);
-    return lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
   case ISD::EXPERIMENTAL_VP_SPLICE:
     return lowerVPSpliceExperimental(Op, DAG);
   case ISD::EXPERIMENTAL_VP_REVERSE:

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
index 18e0ab9b72608..53700676eaed3 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h
@@ -375,7 +375,6 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
         Intrinsic::vp_ashr,
         Intrinsic::vp_bitreverse,
         Intrinsic::vp_bswap,
-        Intrinsic::vp_ceil,
         Intrinsic::vp_copysign,
         Intrinsic::vp_ctlz,
         Intrinsic::vp_ctpop,
@@ -385,7 +384,6 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
         Intrinsic::vp_fadd,
         Intrinsic::vp_fcmp,
         Intrinsic::vp_fdiv,
-        Intrinsic::vp_floor,
         Intrinsic::vp_fma,
         Intrinsic::vp_fmul,
         Intrinsic::vp_fmuladd,
@@ -409,7 +407,6 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
         Intrinsic::vp_merge,
         Intrinsic::vp_minnum,
         Intrinsic::vp_mul,
-        Intrinsic::vp_nearbyint,
         Intrinsic::vp_or,
         Intrinsic::vp_ptrtoint,
         Intrinsic::vp_reduce_add,
@@ -427,10 +424,6 @@ class RISCVTTIImpl final : public BasicTTIImplBase<RISCVTTIImpl> {
         Intrinsic::vp_reduce_umax,
         Intrinsic::vp_reduce_umin,
         Intrinsic::vp_reduce_xor,
-        Intrinsic::vp_rint,
-        Intrinsic::vp_round,
-        Intrinsic::vp_roundeven,
-        Intrinsic::vp_roundtozero,
         Intrinsic::vp_sadd_sat,
         Intrinsic::vp_scatter,
         Intrinsic::vp_sdiv,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index 00fef2e6304ce..bb2c0d6f94804 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -15,27 +15,21 @@
 define <vscale x 1 x bfloat> @vp_ceil_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.ceil.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -44,7 +38,7 @@ define <vscale x 1 x bfloat> @vp_ceil_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vs
 define <vscale x 1 x bfloat> @vp_ceil_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -67,27 +61,21 @@ define <vscale x 1 x bfloat> @vp_ceil_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat>
 define <vscale x 2 x bfloat> @vp_ceil_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.ceil.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -96,7 +84,7 @@ define <vscale x 2 x bfloat> @vp_ceil_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vs
 define <vscale x 2 x bfloat> @vp_ceil_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -119,27 +107,21 @@ define <vscale x 2 x bfloat> @vp_ceil_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat>
 define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.ceil.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -148,7 +130,7 @@ define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vs
 define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -171,27 +153,21 @@ define <vscale x 4 x bfloat> @vp_ceil_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat>
 define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.ceil.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -200,7 +176,7 @@ define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vs
 define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -223,27 +199,21 @@ define <vscale x 8 x bfloat> @vp_ceil_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat>
 define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.ceil.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -252,7 +222,7 @@ define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16(<vscale x 16 x bfloat> %va,
 define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -275,60 +245,35 @@ define <vscale x 16 x bfloat> @vp_ceil_vv_nxv16bf16_unmasked(<vscale x 16 x bflo
 define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 3
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    fsrmi a0, 3
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.ceil.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -337,46 +282,12 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16(<vscale x 32 x bfloat> %va,
 define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -385,7 +296,21 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16_unmasked(<vscale x 32 x bflo
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    fsrmi a0, 3
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.ceil.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -394,15 +319,13 @@ define <vscale x 32 x bfloat> @vp_ceil_vv_nxv32bf16_unmasked(<vscale x 32 x bflo
 define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -412,27 +335,21 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -441,7 +358,7 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
 define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -457,7 +374,7 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -480,15 +397,13 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
 define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -498,27 +413,21 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -527,7 +436,7 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
 define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -543,7 +452,7 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -566,15 +475,13 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
 define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -584,27 +491,21 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -613,7 +514,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
 define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -629,7 +530,7 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -652,17 +553,13 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
 define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -672,27 +569,21 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -701,7 +592,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
 define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -717,7 +608,7 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -740,17 +631,13 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
 define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -760,27 +647,21 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -789,7 +670,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
 define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -805,7 +686,7 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -828,17 +709,13 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %
 define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -848,60 +725,35 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    fsrmi a0, 3
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -910,7 +762,7 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
 define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_vv_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -926,46 +778,12 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ;
 ; ZVFHMIN-LABEL: vp_ceil_vv_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -974,7 +792,21 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    fsrmi a0, 3
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -983,14 +815,12 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %
 define <vscale x 1 x float> @vp_ceil_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1004,7 +834,7 @@ define <vscale x 1 x float> @vp_ceil_vv_nxv1f32(<vscale x 1 x float> %va, <vscal
 define <vscale x 1 x float> @vp_ceil_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1023,14 +853,12 @@ define <vscale x 1 x float> @vp_ceil_vv_nxv1f32_unmasked(<vscale x 1 x float> %v
 define <vscale x 2 x float> @vp_ceil_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1044,7 +872,7 @@ define <vscale x 2 x float> @vp_ceil_vv_nxv2f32(<vscale x 2 x float> %va, <vscal
 define <vscale x 2 x float> @vp_ceil_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1063,16 +891,12 @@ define <vscale x 2 x float> @vp_ceil_vv_nxv2f32_unmasked(<vscale x 2 x float> %v
 define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1086,7 +910,7 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
 define <vscale x 4 x float> @vp_ceil_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1105,16 +929,12 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32_unmasked(<vscale x 4 x float> %v
 define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1128,7 +948,7 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
 define <vscale x 8 x float> @vp_ceil_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1147,16 +967,12 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32_unmasked(<vscale x 8 x float> %v
 define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1170,7 +986,7 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vs
 define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_vv_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1189,14 +1005,12 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float>
 define <vscale x 1 x double> @vp_ceil_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v9, v8
+; RV32ZFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1206,15 +1020,13 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64(<vscale x 1 x double> %va, <vsc
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v9, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
 ; RV64ZFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZFH-NEXT:    fsrmi a0, 3
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1228,9 +1040,9 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64(<vscale x 1 x double> %va, <vsc
 define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v9, v8
 ; RV32ZFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
@@ -1243,7 +1055,7 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double>
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v9, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
@@ -1263,16 +1075,12 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double>
 define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v10, v8
+; RV32ZFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
-; RV32ZFH-NEXT:    vmv1r.v v0, v12
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1282,17 +1090,13 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZFH-NEXT:    vmv1r.v v12, v0
-; RV64ZFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v10, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
 ; RV64ZFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZFH-NEXT:    fsrmi a0, 3
-; RV64ZFH-NEXT:    vmv1r.v v0, v12
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1306,9 +1110,9 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
 define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v10, v8
 ; RV32ZFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
@@ -1321,7 +1125,7 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v10, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
@@ -1341,16 +1145,12 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
 define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v12, v8
+; RV32ZFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
-; RV32ZFH-NEXT:    vmv1r.v v0, v16
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1360,17 +1160,13 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZFH-NEXT:    vmv1r.v v16, v0
-; RV64ZFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v12, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
 ; RV64ZFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZFH-NEXT:    fsrmi a0, 3
-; RV64ZFH-NEXT:    vmv1r.v v0, v16
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1384,9 +1180,9 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
 define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v12, v8
 ; RV32ZFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
@@ -1399,7 +1195,7 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v12, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
@@ -1419,16 +1215,12 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
 define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v16, v8
+; RV32ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
-; RV32ZFH-NEXT:    vmv1r.v v0, v24
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1438,17 +1230,13 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vsc
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vmv1r.v v24, v0
-; RV64ZFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v16, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
 ; RV64ZFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZFH-NEXT:    fsrmi a0, 3
-; RV64ZFH-NEXT:    vmv1r.v v0, v24
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1462,9 +1250,9 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vsc
 define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v16, v8
 ; RV32ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
@@ -1477,7 +1265,7 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v16, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
@@ -1497,16 +1285,12 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
 define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v16, v8
+; RV32ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
-; RV32ZFH-NEXT:    vmv1r.v v0, v24
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1516,17 +1300,13 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vmv1r.v v24, v0
-; RV64ZFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v16, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
 ; RV64ZFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZFH-NEXT:    fsrmi a0, 3
-; RV64ZFH-NEXT:    vmv1r.v v0, v24
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1540,9 +1320,9 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
 define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v16, v8
 ; RV32ZFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
@@ -1555,7 +1335,7 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v16, v8
 ; RV64ZFH-NEXT:    li a0, 1075
 ; RV64ZFH-NEXT:    slli a0, a0, 52
@@ -1577,93 +1357,53 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
 define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZFH-NEXT:    vmv1r.v v7, v0
-; RV32ZFH-NEXT:    csrr a1, vlenb
-; RV32ZFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZFH-NEXT:    srli a3, a1, 3
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZFH-NEXT:    sub a2, a0, a1
-; RV32ZFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZFH-NEXT:    sltu a3, a0, a2
-; RV32ZFH-NEXT:    addi a3, a3, -1
-; RV32ZFH-NEXT:    and a2, a3, a2
-; RV32ZFH-NEXT:    vmv1r.v v0, v6
-; RV32ZFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZFH-NEXT:    fsrmi a2, 3
-; RV32ZFH-NEXT:    vmv1r.v v0, v6
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZFH-NEXT:    fsrm a2
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT:    vfabs.v v24, v8
+; RV32ZFH-NEXT:    fsrmi a0, 3
+; RV32ZFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZFH-NEXT:    vfabs.v v24, v16
+; RV32ZFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZFH-NEXT:  # %bb.1:
-; RV32ZFH-NEXT:    mv a0, a1
-; RV32ZFH-NEXT:  .LBB44_2:
-; RV32ZFH-NEXT:    vmv1r.v v0, v7
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZFH-NEXT:    fsrmi a0, 3
+; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZFH-NEXT:    ret
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZFH-NEXT:    vmv1r.v v7, v0
-; RV64ZFH-NEXT:    csrr a1, vlenb
-; RV64ZFH-NEXT:    li a2, 1075
-; RV64ZFH-NEXT:    srli a3, a1, 3
-; RV64ZFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZFH-NEXT:    sub a3, a0, a1
-; RV64ZFH-NEXT:    slli a2, a2, 52
-; RV64ZFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZFH-NEXT:    sltu a2, a0, a3
-; RV64ZFH-NEXT:    addi a2, a2, -1
-; RV64ZFH-NEXT:    and a2, a2, a3
-; RV64ZFH-NEXT:    vmv1r.v v0, v6
-; RV64ZFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZFH-NEXT:    fsrmi a2, 3
-; RV64ZFH-NEXT:    vmv1r.v v0, v6
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZFH-NEXT:    fsrm a2
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vfabs.v v24, v8
+; RV64ZFH-NEXT:    li a0, 1075
+; RV64ZFH-NEXT:    slli a0, a0, 52
+; RV64ZFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZFH-NEXT:    fsrmi a0, 3
+; RV64ZFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZFH-NEXT:    vfabs.v v24, v16
+; RV64ZFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZFH-NEXT:  # %bb.1:
-; RV64ZFH-NEXT:    mv a0, a1
-; RV64ZFH-NEXT:  .LBB44_2:
-; RV64ZFH-NEXT:    vmv1r.v v0, v7
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZFH-NEXT:    fsrmi a0, 3
+; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZFH-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -1672,69 +1412,53 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
 define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked:
 ; RV32ZFH:       # %bb.0:
-; RV32ZFH-NEXT:    csrr a1, vlenb
-; RV32ZFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZFH-NEXT:    sub a3, a0, a1
-; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZFH-NEXT:    sltu a2, a0, a3
-; RV32ZFH-NEXT:    addi a2, a2, -1
-; RV32ZFH-NEXT:    and a2, a2, a3
-; RV32ZFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZFH-NEXT:    vfabs.v v24, v16
-; RV32ZFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZFH-NEXT:    fsrmi a2, 3
-; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZFH-NEXT:    fsrm a2
-; RV32ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZFH-NEXT:  # %bb.1:
-; RV32ZFH-NEXT:    mv a0, a1
-; RV32ZFH-NEXT:  .LBB45_2:
-; RV32ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZFH-NEXT:    vfabs.v v24, v8
-; RV32ZFH-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZFH-NEXT:    fsrmi a0, 3
+; RV32ZFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZFH-NEXT:    vfabs.v v24, v16
+; RV32ZFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZFH-NEXT:    fsrm a0
 ; RV32ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT:    fsrmi a0, 3
 ; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT:    vmv1r.v v0, v7
+; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZFH-NEXT:    fsrm a0
+; RV32ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZFH-NEXT:    ret
 ;
 ; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked:
 ; RV64ZFH:       # %bb.0:
-; RV64ZFH-NEXT:    csrr a1, vlenb
-; RV64ZFH-NEXT:    li a2, 1075
-; RV64ZFH-NEXT:    sub a3, a0, a1
-; RV64ZFH-NEXT:    slli a2, a2, 52
-; RV64ZFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZFH-NEXT:    sltu a2, a0, a3
-; RV64ZFH-NEXT:    addi a2, a2, -1
-; RV64ZFH-NEXT:    and a2, a2, a3
-; RV64ZFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZFH-NEXT:    vfabs.v v24, v16
-; RV64ZFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZFH-NEXT:    fsrmi a2, 3
-; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZFH-NEXT:    fsrm a2
-; RV64ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZFH-NEXT:  # %bb.1:
-; RV64ZFH-NEXT:    mv a0, a1
-; RV64ZFH-NEXT:  .LBB45_2:
-; RV64ZFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZFH-NEXT:    vfabs.v v24, v8
-; RV64ZFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZFH-NEXT:    li a0, 1075
+; RV64ZFH-NEXT:    slli a0, a0, 52
+; RV64ZFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZFH-NEXT:    fsrmi a0, 3
+; RV64ZFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZFH-NEXT:    vfabs.v v24, v16
+; RV64ZFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZFH-NEXT:    fsrm a0
 ; RV64ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT:    fsrmi a0, 3
 ; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT:    vmv1r.v v0, v7
+; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZFH-NEXT:    fsrm a0
+; RV64ZFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZFH-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 4452e161ad593..e6a0d5d32af84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -11,15 +11,13 @@
 define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,27 +27,21 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <2 x half> @llvm.vp.ceil.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
   ret <2 x half> %v
@@ -58,7 +50,7 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -74,7 +66,7 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -97,15 +89,13 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -115,27 +105,21 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <4 x half> @llvm.vp.ceil.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x half> %v
@@ -144,7 +128,7 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -160,7 +144,7 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -183,15 +167,13 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -201,27 +183,21 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <8 x half> @llvm.vp.ceil.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
   ret <8 x half> %v
@@ -230,7 +206,7 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -246,7 +222,7 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -269,17 +245,13 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 3
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -289,27 +261,21 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <16 x half> @llvm.vp.ceil.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
   ret <16 x half> %v
@@ -318,7 +284,7 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_ceil_v16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -334,7 +300,7 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_ceil_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -357,14 +323,12 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -378,7 +342,7 @@ define <2 x float> @vp_ceil_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl
 define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -397,14 +361,12 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -418,7 +380,7 @@ define <4 x float> @vp_ceil_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl
 define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -437,16 +399,12 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -460,7 +418,7 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
 define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -479,16 +437,12 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 3
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -502,7 +456,7 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_ceil_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -521,14 +475,12 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -538,15 +490,13 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -556,14 +506,12 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -573,15 +521,13 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -595,9 +541,9 @@ define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
@@ -610,7 +556,7 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -626,9 +572,9 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
@@ -641,7 +587,7 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -661,16 +607,12 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -680,17 +622,13 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -700,16 +638,12 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -719,17 +653,13 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -743,9 +673,9 @@ define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
@@ -758,7 +688,7 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -774,9 +704,9 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
@@ -789,7 +719,7 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -809,16 +739,12 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -828,17 +754,13 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -848,16 +770,12 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -867,17 +785,13 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -891,9 +805,9 @@ define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
@@ -906,7 +820,7 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -922,9 +836,9 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
@@ -937,7 +851,7 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -957,16 +871,12 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v15f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -976,17 +886,13 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v15f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -996,16 +902,12 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v15f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1015,17 +917,13 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v15f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1039,9 +937,9 @@ define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v15f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
@@ -1054,7 +952,7 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v15f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1070,9 +968,9 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
@@ -1085,7 +983,7 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1105,16 +1003,12 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1124,17 +1018,13 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1144,16 +1034,12 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1163,17 +1049,13 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1187,9 +1069,9 @@ define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 3
@@ -1202,7 +1084,7 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1218,9 +1100,9 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
@@ -1233,7 +1115,7 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1253,41 +1135,21 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v32f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v6, v0
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB26_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFH-NEXT:    addi a1, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a1
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 3
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 3
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 3
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
@@ -1298,42 +1160,22 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v32f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v6, v0
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB26_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    li a1, 1075
-; RV64ZVFH-NEXT:    slli a1, a1, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFH-NEXT:    addi a1, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a1
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    and a0, a0, a1
-; RV64ZVFH-NEXT:    fsrmi a1, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 3
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 3
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
@@ -1344,41 +1186,21 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v32f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB26_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
@@ -1389,42 +1211,22 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v32f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB26_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    li a1, 1075
-; RV64ZVFHMIN-NEXT:    slli a1, a1, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    and a0, a0, a1
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
@@ -1439,36 +1241,24 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_ceil_v32f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB27_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFH-NEXT:    addi a2, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a2
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a2
-; RV32ZVFH-NEXT:    fsrmi a2, 3
+; RV32ZVFH-NEXT:    fsrmi a0, 3
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v16
 ; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 3
+; RV32ZVFH-NEXT:    fsrmi a0, 3
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1476,37 +1266,25 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFH-LABEL: vp_ceil_v32f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB27_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    addi a2, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a2
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    and a0, a0, a2
-; RV64ZVFH-NEXT:    fsrmi a2, 3
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 3
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v16
 ; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsrmi a1, 3
+; RV64ZVFH-NEXT:    fsrmi a0, 3
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1514,36 +1292,24 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ;
 ; RV32ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB27_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a2
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 3
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 3
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1551,37 +1317,25 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ;
 ; RV64ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB27_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    and a0, a0, a2
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 3
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 3
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 3
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 69a136d6e9358..c0d510654111c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -11,15 +11,13 @@
 define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,27 +27,21 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_floor_v2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <2 x half> @llvm.vp.floor.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
   ret <2 x half> %v
@@ -58,7 +50,7 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -74,7 +66,7 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_floor_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -97,15 +89,13 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -115,27 +105,21 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_floor_v4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <4 x half> @llvm.vp.floor.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x half> %v
@@ -144,7 +128,7 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -160,7 +144,7 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_floor_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -183,15 +167,13 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -201,27 +183,21 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_floor_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <8 x half> @llvm.vp.floor.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
   ret <8 x half> %v
@@ -230,7 +206,7 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -246,7 +222,7 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_floor_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -269,17 +245,13 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -289,27 +261,21 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 ;
 ; ZVFHMIN-LABEL: vp_floor_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <16 x half> @llvm.vp.floor.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
   ret <16 x half> %v
@@ -318,7 +284,7 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_v16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -334,7 +300,7 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_floor_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -357,14 +323,12 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -378,7 +342,7 @@ define <2 x float> @vp_floor_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -397,14 +361,12 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -418,7 +380,7 @@ define <4 x float> @vp_floor_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -437,16 +399,12 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -460,7 +418,7 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -479,16 +437,12 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -502,7 +456,7 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -521,14 +475,12 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -538,15 +490,13 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_floor_v2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -556,14 +506,12 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -573,15 +521,13 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -595,9 +541,9 @@ define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -610,7 +556,7 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_floor_v2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -626,9 +572,9 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -641,7 +587,7 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -661,16 +607,12 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -680,17 +622,13 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_floor_v4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -700,16 +638,12 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -719,17 +653,13 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -743,9 +673,9 @@ define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -758,7 +688,7 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_floor_v4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -774,9 +704,9 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -789,7 +719,7 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -809,16 +739,12 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -828,17 +754,13 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_floor_v8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -848,16 +770,12 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -867,17 +785,13 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -891,9 +805,9 @@ define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -906,7 +820,7 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_floor_v8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -922,9 +836,9 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -937,7 +851,7 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -957,16 +871,12 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v15f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -976,17 +886,13 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_floor_v15f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -996,16 +902,12 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v15f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1015,17 +917,13 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v15f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1039,9 +937,9 @@ define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v15f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1054,7 +952,7 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_floor_v15f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1070,9 +968,9 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v15f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1085,7 +983,7 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v15f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1105,16 +1003,12 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1124,17 +1018,13 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_floor_v16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1144,16 +1034,12 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1163,17 +1049,13 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1187,9 +1069,9 @@ define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1202,7 +1084,7 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_floor_v16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1218,9 +1100,9 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1233,7 +1115,7 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1253,41 +1135,21 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v32f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v6, v0
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB26_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFH-NEXT:    addi a1, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a1
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 2
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 2
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
@@ -1298,42 +1160,22 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_floor_v32f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v6, v0
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB26_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    li a1, 1075
-; RV64ZVFH-NEXT:    slli a1, a1, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFH-NEXT:    addi a1, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a1
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    and a0, a0, a1
-; RV64ZVFH-NEXT:    fsrmi a1, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 2
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 2
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
@@ -1344,41 +1186,21 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v32f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB26_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
@@ -1389,42 +1211,22 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v32f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB26_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    li a1, 1075
-; RV64ZVFHMIN-NEXT:    slli a1, a1, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    and a0, a0, a1
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
@@ -1439,36 +1241,24 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_v32f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB27_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFH-NEXT:    addi a2, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a2
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a2
-; RV32ZVFH-NEXT:    fsrmi a2, 2
+; RV32ZVFH-NEXT:    fsrmi a0, 2
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v16
 ; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 2
+; RV32ZVFH-NEXT:    fsrmi a0, 2
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1476,37 +1266,25 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_floor_v32f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB27_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    addi a2, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a2
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    and a0, a0, a2
-; RV64ZVFH-NEXT:    fsrmi a2, 2
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 2
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v16
 ; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsrmi a1, 2
+; RV64ZVFH-NEXT:    fsrmi a0, 2
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1514,36 +1292,24 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_v32f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB27_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a2
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 2
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 2
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1551,37 +1317,25 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_v32f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB27_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    and a0, a0, a2
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 2
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 2
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 10ef72a3e3f9d..9f0c761d3bbbb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -7,15 +7,13 @@
 define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -29,7 +27,7 @@ define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v2f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -49,15 +47,13 @@ define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -71,7 +67,7 @@ define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v4f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -91,15 +87,13 @@ define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -113,7 +107,7 @@ define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v8f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -133,17 +127,13 @@ define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -157,7 +147,7 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v16f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -177,14 +167,12 @@ define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -198,7 +186,7 @@ define <2 x float> @vp_nearbyint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext
 define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -217,14 +205,12 @@ define <2 x float> @vp_nearbyint_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -238,7 +224,7 @@ define <4 x float> @vp_nearbyint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext
 define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -257,16 +243,12 @@ define <4 x float> @vp_nearbyint_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -280,7 +262,7 @@ define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
 define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -299,16 +281,12 @@ define <8 x float> @vp_nearbyint_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -322,7 +300,7 @@ define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
 define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -341,14 +319,12 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext
 define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT:    vfabs.v v9, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vfabs.v v9, v8
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    frflags a0
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -358,15 +334,13 @@ define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ;
 ; RV64-LABEL: vp_nearbyint_v2f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64-NEXT:    vfabs.v v9, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    frflags a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -380,9 +354,9 @@ define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v2f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    frflags a0
@@ -395,7 +369,7 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ;
 ; RV64-LABEL: vp_nearbyint_v2f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -415,16 +389,12 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT:    vmv1r.v v12, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32-NEXT:    vfabs.v v10, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vfabs.v v10, v8
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    frflags a0
-; RV32-NEXT:    vmv1r.v v0, v12
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -434,17 +404,13 @@ define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ;
 ; RV64-LABEL: vp_nearbyint_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64-NEXT:    vmv1r.v v12, v0
-; RV64-NEXT:    vfabs.v v10, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    frflags a0
-; RV64-NEXT:    vmv1r.v v0, v12
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -458,9 +424,9 @@ define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v4f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    frflags a0
@@ -473,7 +439,7 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ;
 ; RV64-LABEL: vp_nearbyint_v4f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -493,16 +459,12 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT:    vmv1r.v v16, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32-NEXT:    vfabs.v v12, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vfabs.v v12, v8
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    frflags a0
-; RV32-NEXT:    vmv1r.v v0, v16
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -512,17 +474,13 @@ define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ;
 ; RV64-LABEL: vp_nearbyint_v8f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64-NEXT:    vmv1r.v v16, v0
-; RV64-NEXT:    vfabs.v v12, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vfabs.v v12, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    frflags a0
-; RV64-NEXT:    vmv1r.v v0, v16
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -536,9 +494,9 @@ define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v8f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
 ; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    frflags a0
@@ -551,7 +509,7 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ;
 ; RV64-LABEL: vp_nearbyint_v8f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -571,16 +529,12 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v15f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vmv1r.v v24, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32-NEXT:    vfabs.v v16, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v16, v8
+; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    frflags a0
-; RV32-NEXT:    vmv1r.v v0, v24
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -590,17 +544,13 @@ define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ;
 ; RV64-LABEL: vp_nearbyint_v15f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vmv1r.v v24, v0
-; RV64-NEXT:    vfabs.v v16, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64-NEXT:    frflags a0
-; RV64-NEXT:    vmv1r.v v0, v24
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -614,9 +564,9 @@ define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v15f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v16, v8
 ; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    frflags a0
@@ -629,7 +579,7 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ;
 ; RV64-LABEL: vp_nearbyint_v15f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -649,16 +599,12 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
 define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v16f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vmv1r.v v24, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32-NEXT:    vfabs.v v16, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v16, v8
+; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    frflags a0
-; RV32-NEXT:    vmv1r.v v0, v24
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -668,17 +614,13 @@ define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ;
 ; RV64-LABEL: vp_nearbyint_v16f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vmv1r.v v24, v0
-; RV64-NEXT:    vfabs.v v16, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64-NEXT:    frflags a0
-; RV64-NEXT:    vmv1r.v v0, v24
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -692,9 +634,9 @@ define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v16f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v16, v8
 ; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    frflags a0
@@ -707,7 +649,7 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ;
 ; RV64-LABEL: vp_nearbyint_v16f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -727,39 +669,19 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
 define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v32f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vmv1r.v v6, v0
-; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    mv a1, a0
-; RV32-NEXT:    bltu a0, a2, .LBB26_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    li a1, 16
-; RV32-NEXT:  .LBB26_2:
-; RV32-NEXT:    vmv1r.v v0, v6
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT:    vfabs.v v24, v8, v0.t
-; RV32-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32-NEXT:    addi a1, a0, -16
-; RV32-NEXT:    sltu a0, a0, a1
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    and a0, a0, a1
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32-NEXT:    frflags a1
-; RV32-NEXT:    vmv1r.v v0, v6
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v24, v8
+; RV32-NEXT:    frflags a0
+; RV32-NEXT:    vmflt.vf v0, v24, fa5
+; RV32-NEXT:    vfabs.v v24, v16
+; RV32-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32-NEXT:    fsflags a1
+; RV32-NEXT:    fsflags a0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; RV32-NEXT:    vmv1r.v v0, v7
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vfabs.v v24, v16, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32-NEXT:    frflags a0
 ; RV32-NEXT:    vmv1r.v v0, v7
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
@@ -772,40 +694,20 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ;
 ; RV64-LABEL: vp_nearbyint_v32f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT:    vmv1r.v v6, v0
-; RV64-NEXT:    li a2, 16
-; RV64-NEXT:    vslidedown.vi v7, v0, 2
-; RV64-NEXT:    mv a1, a0
-; RV64-NEXT:    bltu a0, a2, .LBB26_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    li a1, 16
-; RV64-NEXT:  .LBB26_2:
-; RV64-NEXT:    vmv1r.v v0, v6
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT:    vfabs.v v24, v8, v0.t
-; RV64-NEXT:    li a1, 1075
-; RV64-NEXT:    slli a1, a1, 52
-; RV64-NEXT:    fmv.d.x fa5, a1
-; RV64-NEXT:    addi a1, a0, -16
-; RV64-NEXT:    sltu a0, a0, a1
-; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64-NEXT:    and a0, a0, a1
-; RV64-NEXT:    frflags a1
-; RV64-NEXT:    vmv1r.v v0, v6
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v24, v8
+; RV64-NEXT:    li a0, 1075
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    frflags a0
+; RV64-NEXT:    vmflt.vf v0, v24, fa5
+; RV64-NEXT:    vfabs.v v24, v16
+; RV64-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64-NEXT:    fsflags a1
+; RV64-NEXT:    fsflags a0
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; RV64-NEXT:    vmv1r.v v0, v7
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vfabs.v v24, v16, v0.t
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64-NEXT:    frflags a0
 ; RV64-NEXT:    vmv1r.v v0, v7
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
@@ -822,77 +724,53 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_nearbyint_v32f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    mv a1, a0
-; RV32-NEXT:    bltu a0, a2, .LBB27_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    li a1, 16
-; RV32-NEXT:  .LBB27_2:
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v24, v8
-; RV32-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32-NEXT:    addi a2, a0, -16
-; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    and a0, a0, a2
-; RV32-NEXT:    frflags a2
+; RV32-NEXT:    frflags a0
 ; RV32-NEXT:    vmflt.vf v0, v24, fa5
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v24, v16
 ; RV32-NEXT:    vmflt.vf v7, v24, fa5
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32-NEXT:    fsflags a2
+; RV32-NEXT:    fsflags a0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; RV32-NEXT:    frflags a1
+; RV32-NEXT:    frflags a0
 ; RV32-NEXT:    vmv1r.v v0, v7
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32-NEXT:    fsflags a1
+; RV32-NEXT:    fsflags a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vp_nearbyint_v32f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    li a2, 16
-; RV64-NEXT:    mv a1, a0
-; RV64-NEXT:    bltu a0, a2, .LBB27_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    li a1, 16
-; RV64-NEXT:  .LBB27_2:
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v24, v8
-; RV64-NEXT:    li a2, 1075
-; RV64-NEXT:    slli a2, a2, 52
-; RV64-NEXT:    fmv.d.x fa5, a2
-; RV64-NEXT:    addi a2, a0, -16
-; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    and a0, a0, a2
-; RV64-NEXT:    frflags a2
+; RV64-NEXT:    li a0, 1075
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    frflags a0
 ; RV64-NEXT:    vmflt.vf v0, v24, fa5
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v24, v16
 ; RV64-NEXT:    vmflt.vf v7, v24, fa5
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64-NEXT:    fsflags a2
+; RV64-NEXT:    fsflags a0
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
-; RV64-NEXT:    frflags a1
+; RV64-NEXT:    frflags a0
 ; RV64-NEXT:    vmv1r.v v0, v7
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64-NEXT:    fsflags a1
+; RV64-NEXT:    fsflags a0
 ; RV64-NEXT:    ret
   %v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 1d9ef739255ff..ae2d95590eb79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -7,14 +7,12 @@
 define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -27,7 +25,7 @@ define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v2f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -45,14 +43,12 @@ define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -65,7 +61,7 @@ define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v4f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -83,14 +79,12 @@ define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -103,7 +97,7 @@ define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v8f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -121,16 +115,12 @@ define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
 ; CHECK-NEXT:    fmv.h.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -143,7 +133,7 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
 define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v16f16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    li a0, 25
 ; CHECK-NEXT:    slli a0, a0, 10
@@ -161,13 +151,11 @@ define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -180,7 +168,7 @@ define <2 x float> @vp_rint_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl
 define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -197,13 +185,11 @@ define <2 x float> @vp_rint_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -216,7 +202,7 @@ define <4 x float> @vp_rint_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl
 define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -233,15 +219,11 @@ define <4 x float> @vp_rint_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -254,7 +236,7 @@ define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
 define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -271,15 +253,11 @@ define <8 x float> @vp_rint_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -292,7 +270,7 @@ define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -309,13 +287,11 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
 define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v2f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT:    vfabs.v v9, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vfabs.v v9, v8
+; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -324,14 +300,12 @@ define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 ;
 ; RV64-LABEL: vp_rint_v2f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64-NEXT:    vfabs.v v9, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -344,9 +318,9 @@ define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %e
 define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v2f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32-NEXT:    vfabs.v v9, v8
 ; RV32-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -357,7 +331,7 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64-LABEL: vp_rint_v2f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vfabs.v v9, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -375,15 +349,11 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v4f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT:    vmv1r.v v12, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32-NEXT:    vfabs.v v10, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32-NEXT:    vmv1r.v v0, v12
-; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT:    vfabs.v v10, v8
+; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -392,16 +362,12 @@ define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 ;
 ; RV64-LABEL: vp_rint_v4f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64-NEXT:    vmv1r.v v12, v0
-; RV64-NEXT:    vfabs.v v10, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64-NEXT:    vmv1r.v v0, v12
-; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -414,9 +380,9 @@ define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %e
 define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v4f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vfabs.v v10, v8
 ; RV32-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v10, v8, v0.t
@@ -427,7 +393,7 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64-LABEL: vp_rint_v4f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vfabs.v v10, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -445,15 +411,11 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v8f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT:    vmv1r.v v16, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32-NEXT:    vfabs.v v12, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32-NEXT:    vmv1r.v v0, v16
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT:    vfabs.v v12, v8
+; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -462,16 +424,12 @@ define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 ;
 ; RV64-LABEL: vp_rint_v8f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64-NEXT:    vmv1r.v v16, v0
-; RV64-NEXT:    vfabs.v v12, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT:    vfabs.v v12, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64-NEXT:    vmv1r.v v0, v16
-; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -484,9 +442,9 @@ define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %e
 define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v8f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32-NEXT:    vfabs.v v12, v8
 ; RV32-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -497,7 +455,7 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64-LABEL: vp_rint_v8f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vfabs.v v12, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -515,15 +473,11 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v15f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vmv1r.v v24, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32-NEXT:    vfabs.v v16, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32-NEXT:    vmv1r.v v0, v24
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v16, v8
+; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -532,16 +486,12 @@ define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 ;
 ; RV64-LABEL: vp_rint_v15f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vmv1r.v v24, v0
-; RV64-NEXT:    vfabs.v v16, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64-NEXT:    vmv1r.v v0, v24
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -554,9 +504,9 @@ define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroex
 define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v15f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v16, v8
 ; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -567,7 +517,7 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 ;
 ; RV64-LABEL: vp_rint_v15f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -585,15 +535,11 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
 define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v16f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vmv1r.v v24, v0
 ; RV32-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32-NEXT:    vfabs.v v16, v8, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32-NEXT:    vmv1r.v v0, v24
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v16, v8
+; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -602,16 +548,12 @@ define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 ;
 ; RV64-LABEL: vp_rint_v16f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vmv1r.v v24, v0
-; RV64-NEXT:    vfabs.v v16, v8, v0.t
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
 ; RV64-NEXT:    fmv.d.x fa5, a0
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64-NEXT:    vmv1r.v v0, v24
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -624,9 +566,9 @@ define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroex
 define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v16f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v16, v8
 ; RV32-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -637,7 +579,7 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 ;
 ; RV64-LABEL: vp_rint_v16f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v16, v8
 ; RV64-NEXT:    li a0, 1075
 ; RV64-NEXT:    slli a0, a0, 52
@@ -655,38 +597,18 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
 define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v32f64:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT:    vmv1r.v v6, v0
-; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    vslidedown.vi v7, v0, 2
-; RV32-NEXT:    mv a1, a0
-; RV32-NEXT:    bltu a0, a2, .LBB26_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    li a1, 16
-; RV32-NEXT:  .LBB26_2:
-; RV32-NEXT:    vmv1r.v v0, v6
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT:    vfabs.v v24, v8, v0.t
-; RV32-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32-NEXT:    addi a1, a0, -16
-; RV32-NEXT:    sltu a0, a0, a1
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32-NEXT:    and a0, a0, a1
-; RV32-NEXT:    vmv1r.v v0, v6
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT:    vfabs.v v24, v8
+; RV32-NEXT:    vmflt.vf v0, v24, fa5
+; RV32-NEXT:    vfabs.v v24, v16
+; RV32-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v7
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT:    vfabs.v v24, v16, v0.t
-; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32-NEXT:    vmv1r.v v0, v7
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
@@ -696,39 +618,19 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 ;
 ; RV64-LABEL: vp_rint_v32f64:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT:    vmv1r.v v6, v0
-; RV64-NEXT:    li a2, 16
-; RV64-NEXT:    vslidedown.vi v7, v0, 2
-; RV64-NEXT:    mv a1, a0
-; RV64-NEXT:    bltu a0, a2, .LBB26_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    li a1, 16
-; RV64-NEXT:  .LBB26_2:
-; RV64-NEXT:    vmv1r.v v0, v6
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT:    vfabs.v v24, v8, v0.t
-; RV64-NEXT:    li a1, 1075
-; RV64-NEXT:    slli a1, a1, 52
-; RV64-NEXT:    fmv.d.x fa5, a1
-; RV64-NEXT:    addi a1, a0, -16
-; RV64-NEXT:    sltu a0, a0, a1
-; RV64-NEXT:    addi a0, a0, -1
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64-NEXT:    and a0, a0, a1
-; RV64-NEXT:    vmv1r.v v0, v6
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT:    vfabs.v v24, v8
+; RV64-NEXT:    li a0, 1075
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
+; RV64-NEXT:    vmflt.vf v0, v24, fa5
+; RV64-NEXT:    vfabs.v v24, v16
+; RV64-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64-NEXT:    vmv1r.v v0, v7
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT:    vfabs.v v24, v16, v0.t
-; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64-NEXT:    vmv1r.v v0, v7
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
@@ -742,31 +644,19 @@ define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
 define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32-LABEL: vp_rint_v32f64_unmasked:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    li a2, 16
-; RV32-NEXT:    mv a1, a0
-; RV32-NEXT:    bltu a0, a2, .LBB27_2
-; RV32-NEXT:  # %bb.1:
-; RV32-NEXT:    li a1, 16
-; RV32-NEXT:  .LBB27_2:
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v24, v8
-; RV32-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32-NEXT:    addi a2, a0, -16
-; RV32-NEXT:    sltu a0, a0, a2
-; RV32-NEXT:    addi a0, a0, -1
-; RV32-NEXT:    and a0, a0, a2
 ; RV32-NEXT:    vmflt.vf v0, v24, fa5
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32-NEXT:    vfabs.v v24, v16
 ; RV32-NEXT:    vmflt.vf v7, v24, fa5
-; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32-NEXT:    vmv1r.v v0, v7
-; RV32-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -775,32 +665,20 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
 ;
 ; RV64-LABEL: vp_rint_v32f64_unmasked:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    li a2, 16
-; RV64-NEXT:    mv a1, a0
-; RV64-NEXT:    bltu a0, a2, .LBB27_2
-; RV64-NEXT:  # %bb.1:
-; RV64-NEXT:    li a1, 16
-; RV64-NEXT:  .LBB27_2:
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v24, v8
-; RV64-NEXT:    li a2, 1075
-; RV64-NEXT:    slli a2, a2, 52
-; RV64-NEXT:    fmv.d.x fa5, a2
-; RV64-NEXT:    addi a2, a0, -16
-; RV64-NEXT:    sltu a0, a0, a2
-; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    li a0, 1075
+; RV64-NEXT:    slli a0, a0, 52
+; RV64-NEXT:    fmv.d.x fa5, a0
 ; RV64-NEXT:    vmflt.vf v0, v24, fa5
-; RV64-NEXT:    and a0, a0, a2
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64-NEXT:    vfabs.v v24, v16
 ; RV64-NEXT:    vmflt.vf v7, v24, fa5
-; RV64-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64-NEXT:    vmv1r.v v0, v7
-; RV64-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index ebac7656be29a..91c7b45440621 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -11,15 +11,13 @@
 define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,27 +27,21 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_round_v2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <2 x half> @llvm.vp.round.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
   ret <2 x half> %v
@@ -58,7 +50,7 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
 define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -74,7 +66,7 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_round_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -97,15 +89,13 @@ define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -115,27 +105,21 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_round_v4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <4 x half> @llvm.vp.round.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x half> %v
@@ -144,7 +128,7 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
 define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -160,7 +144,7 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_round_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -183,15 +167,13 @@ define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -201,27 +183,21 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_round_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <8 x half> @llvm.vp.round.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
   ret <8 x half> %v
@@ -230,7 +206,7 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
 define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -246,7 +222,7 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ;
 ; ZVFHMIN-LABEL: vp_round_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -269,17 +245,13 @@ define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -289,27 +261,21 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 ;
 ; ZVFHMIN-LABEL: vp_round_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <16 x half> @llvm.vp.round.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
   ret <16 x half> %v
@@ -318,7 +284,7 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
 define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_v16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -334,7 +300,7 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_round_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -357,14 +323,12 @@ define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
 define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -378,7 +342,7 @@ define <2 x float> @vp_round_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %ev
 define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -397,14 +361,12 @@ define <2 x float> @vp_round_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -418,7 +380,7 @@ define <4 x float> @vp_round_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %ev
 define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -437,16 +399,12 @@ define <4 x float> @vp_round_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -460,7 +418,7 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
 define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -479,16 +437,12 @@ define <8 x float> @vp_round_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -502,7 +456,7 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
 define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -521,14 +475,12 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
 define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -538,15 +490,13 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_round_v2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -556,14 +506,12 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -573,15 +521,13 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -595,9 +541,9 @@ define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %
 define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -610,7 +556,7 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_round_v2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -626,9 +572,9 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -641,7 +587,7 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -661,16 +607,12 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
 define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -680,17 +622,13 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_round_v4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -700,16 +638,12 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -719,17 +653,13 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -743,9 +673,9 @@ define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %
 define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -758,7 +688,7 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_round_v4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -774,9 +704,9 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -789,7 +719,7 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -809,16 +739,12 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
 define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -828,17 +754,13 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_round_v8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -848,16 +770,12 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -867,17 +785,13 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -891,9 +805,9 @@ define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %
 define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -906,7 +820,7 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFH-LABEL: vp_round_v8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -922,9 +836,9 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -937,7 +851,7 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -957,16 +871,12 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
 define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v15f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -976,17 +886,13 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_round_v15f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -996,16 +902,12 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v15f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1015,17 +917,13 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v15f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1039,9 +937,9 @@ define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroe
 define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v15f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1054,7 +952,7 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_round_v15f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1070,9 +968,9 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v15f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1085,7 +983,7 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v15f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1105,16 +1003,12 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
 define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1124,17 +1018,13 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_round_v16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1144,16 +1034,12 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1163,17 +1049,13 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1187,9 +1069,9 @@ define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroe
 define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1202,7 +1084,7 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_round_v16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1218,9 +1100,9 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1233,7 +1115,7 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1253,41 +1135,21 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
 define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v32f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v6, v0
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB26_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFH-NEXT:    addi a1, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a1
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 4
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 4
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
@@ -1298,42 +1160,22 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_round_v32f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v6, v0
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB26_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    li a1, 1075
-; RV64ZVFH-NEXT:    slli a1, a1, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFH-NEXT:    addi a1, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a1
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    and a0, a0, a1
-; RV64ZVFH-NEXT:    fsrmi a1, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 4
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 4
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
@@ -1344,41 +1186,21 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v32f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB26_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
@@ -1389,42 +1211,22 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v32f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB26_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    li a1, 1075
-; RV64ZVFHMIN-NEXT:    slli a1, a1, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    and a0, a0, a1
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
@@ -1439,36 +1241,24 @@ define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
 define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_v32f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB27_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFH-NEXT:    addi a2, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a2
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a2
-; RV32ZVFH-NEXT:    fsrmi a2, 4
+; RV32ZVFH-NEXT:    fsrmi a0, 4
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v16
 ; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 4
+; RV32ZVFH-NEXT:    fsrmi a0, 4
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1476,37 +1266,25 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFH-LABEL: vp_round_v32f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB27_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    addi a2, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a2
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    and a0, a0, a2
-; RV64ZVFH-NEXT:    fsrmi a2, 4
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 4
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v16
 ; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsrmi a1, 4
+; RV64ZVFH-NEXT:    fsrmi a0, 4
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1514,36 +1292,24 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_v32f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB27_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a2
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 4
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 4
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1551,37 +1317,25 @@ define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_v32f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB27_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    and a0, a0, a2
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 4
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 4
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 43566185a200b..5a42a532e513a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -11,15 +11,13 @@
 define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -29,27 +27,21 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <2 x half> @llvm.vp.roundeven.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
   ret <2 x half> %v
@@ -58,7 +50,7 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
 define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -74,7 +66,7 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -97,15 +89,13 @@ define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl)
 define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -115,27 +105,21 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <4 x half> @llvm.vp.roundeven.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x half> %v
@@ -144,7 +128,7 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
 define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -160,7 +144,7 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -183,15 +167,13 @@ define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl)
 define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -201,27 +183,21 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <8 x half> @llvm.vp.roundeven.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
   ret <8 x half> %v
@@ -230,7 +206,7 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
 define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -246,7 +222,7 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -269,17 +245,13 @@ define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl)
 define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -289,27 +261,21 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <16 x half> @llvm.vp.roundeven.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
   ret <16 x half> %v
@@ -318,7 +284,7 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
 define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_v16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -334,7 +300,7 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -357,14 +323,12 @@ define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %e
 define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -378,7 +342,7 @@ define <2 x float> @vp_roundeven_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext
 define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -397,14 +361,12 @@ define <2 x float> @vp_roundeven_v2f32_unmasked(<2 x float> %va, i32 zeroext %ev
 define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -418,7 +380,7 @@ define <4 x float> @vp_roundeven_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext
 define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -437,16 +399,12 @@ define <4 x float> @vp_roundeven_v4f32_unmasked(<4 x float> %va, i32 zeroext %ev
 define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -460,7 +418,7 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
 define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -479,16 +437,12 @@ define <8 x float> @vp_roundeven_v8f32_unmasked(<8 x float> %va, i32 zeroext %ev
 define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -502,7 +456,7 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
 define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -521,14 +475,12 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
 define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -538,15 +490,13 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -556,14 +506,12 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -573,15 +521,13 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -595,9 +541,9 @@ define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroe
 define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -610,7 +556,7 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -626,9 +572,9 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -641,7 +587,7 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -661,16 +607,12 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
 define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -680,17 +622,13 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -700,16 +638,12 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -719,17 +653,13 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -743,9 +673,9 @@ define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroe
 define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -758,7 +688,7 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -774,9 +704,9 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -789,7 +719,7 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -809,16 +739,12 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
 define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -828,17 +754,13 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -848,16 +770,12 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -867,17 +785,13 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -891,9 +805,9 @@ define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroe
 define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -906,7 +820,7 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -922,9 +836,9 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -937,7 +851,7 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -957,16 +871,12 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
 define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v15f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -976,17 +886,13 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v15f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -996,16 +902,12 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1015,17 +917,13 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1039,9 +937,9 @@ define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 z
 define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v15f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1054,7 +952,7 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v15f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1070,9 +968,9 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1085,7 +983,7 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1105,16 +1003,12 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
 define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1124,17 +1018,13 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1144,16 +1034,12 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1163,17 +1049,13 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1187,9 +1069,9 @@ define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 z
 define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1202,7 +1084,7 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1218,9 +1100,9 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1233,7 +1115,7 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1253,41 +1135,21 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
 define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v32f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v6, v0
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB26_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFH-NEXT:    addi a1, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a1
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 0
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 0
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
@@ -1298,42 +1160,22 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v32f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v6, v0
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB26_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    li a1, 1075
-; RV64ZVFH-NEXT:    slli a1, a1, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFH-NEXT:    addi a1, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a1
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    and a0, a0, a1
-; RV64ZVFH-NEXT:    fsrmi a1, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 0
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
@@ -1344,41 +1186,21 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB26_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
@@ -1389,42 +1211,22 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB26_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    li a1, 1075
-; RV64ZVFHMIN-NEXT:    slli a1, a1, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    and a0, a0, a1
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
@@ -1439,36 +1241,24 @@ define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
 define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_v32f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB27_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFH-NEXT:    addi a2, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a2
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a2
-; RV32ZVFH-NEXT:    fsrmi a2, 0
+; RV32ZVFH-NEXT:    fsrmi a0, 0
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v16
 ; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 0
+; RV32ZVFH-NEXT:    fsrmi a0, 0
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1476,37 +1266,25 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_v32f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB27_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    addi a2, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a2
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    and a0, a0, a2
-; RV64ZVFH-NEXT:    fsrmi a2, 0
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v16
 ; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsrmi a1, 0
+; RV64ZVFH-NEXT:    fsrmi a0, 0
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1514,36 +1292,24 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB27_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a2
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 0
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 0
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1551,37 +1317,25 @@ define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroex
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB27_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    and a0, a0, a2
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 0
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 7cae17e8e1ec7..ae48950650fde 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -11,17 +11,13 @@
 define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -29,27 +25,19 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half> %va, <2 x i1> %m, i32 %evl)
   ret <2 x half> %v
@@ -58,15 +46,13 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext
 define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -74,16 +60,14 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -97,17 +81,13 @@ define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %ev
 define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -115,27 +95,19 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half> %va, <4 x i1> %m, i32 %evl)
   ret <4 x half> %v
@@ -144,15 +116,13 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext
 define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -160,16 +130,14 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -183,17 +151,13 @@ define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %ev
 define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -201,27 +165,19 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half> %va, <8 x i1> %m, i32 %evl)
   ret <8 x half> %v
@@ -230,15 +186,13 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
 define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -246,16 +200,14 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
@@ -269,19 +221,13 @@ define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %ev
 define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -289,27 +235,19 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half> %va, <16 x i1> %m, i32 %evl)
   ret <16 x half> %v
@@ -318,15 +256,13 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
 define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_v16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -334,16 +270,14 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_v16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
@@ -357,16 +291,12 @@ define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext
 define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -378,14 +308,12 @@ define <2 x float> @vp_roundtozero_v2f32(<2 x float> %va, <2 x i1> %m, i32 zeroe
 define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -397,16 +325,12 @@ define <2 x float> @vp_roundtozero_v2f32_unmasked(<2 x float> %va, i32 zeroext %
 define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -418,14 +342,12 @@ define <4 x float> @vp_roundtozero_v4f32(<4 x float> %va, <4 x i1> %m, i32 zeroe
 define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -437,18 +359,12 @@ define <4 x float> @vp_roundtozero_v4f32_unmasked(<4 x float> %va, i32 zeroext %
 define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -460,14 +376,12 @@ define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroe
 define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -479,18 +393,12 @@ define <8 x float> @vp_roundtozero_v8f32_unmasked(<8 x float> %va, i32 zeroext %
 define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -502,14 +410,12 @@ define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 z
 define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_v16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v12, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -521,16 +427,12 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
 define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -538,17 +440,13 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -556,16 +454,12 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI16_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI16_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -573,17 +467,13 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -595,14 +485,12 @@ define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zer
 define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -610,15 +498,13 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -626,14 +512,12 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI17_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -641,15 +525,13 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -661,18 +543,12 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
 define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -680,19 +556,13 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -700,18 +570,12 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI18_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI18_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -719,19 +583,13 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -743,14 +601,12 @@ define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zer
 define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -758,15 +614,13 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -774,14 +628,12 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI19_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -789,15 +641,13 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -809,18 +659,12 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
 define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -828,19 +672,13 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -848,18 +686,12 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI20_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI20_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -867,19 +699,13 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -891,14 +717,12 @@ define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zer
 define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -906,15 +730,13 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -922,14 +744,12 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI21_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI21_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -937,15 +757,13 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -957,18 +775,12 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
 define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v15f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -976,19 +788,13 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v15f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -996,18 +802,12 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI22_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI22_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1015,19 +815,13 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1039,14 +833,12 @@ define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32
 define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v15f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1054,15 +846,13 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v15f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1070,14 +860,12 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI23_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI23_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1085,15 +873,13 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1105,18 +891,12 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
 define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1124,19 +904,13 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1144,18 +918,12 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI24_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI24_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1163,19 +931,13 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1187,14 +949,12 @@ define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32
 define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1202,15 +962,13 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1218,14 +976,12 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI25_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI25_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1233,15 +989,13 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1253,44 +1007,20 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
 define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v32f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v6, v0
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB26_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFH-NEXT:    addi a1, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a1
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1298,45 +1028,21 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v32f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v6, v0
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB26_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    li a1, 1075
-; RV64ZVFH-NEXT:    slli a1, a1, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFH-NEXT:    addi a1, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a1
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    and a0, a0, a1
-; RV64ZVFH-NEXT:    fsrmi a1, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1344,44 +1050,20 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB26_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI26_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a1)
-; RV32ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI26_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1389,45 +1071,21 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v6, v0
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    vslidedown.vi v7, v0, 2
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB26_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB26_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    li a1, 1075
-; RV64ZVFHMIN-NEXT:    slli a1, a1, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a1
-; RV64ZVFHMIN-NEXT:    addi a1, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a1
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    and a0, a0, a1
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1439,36 +1097,20 @@ define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32
 define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_v32f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    li a2, 16
-; RV32ZVFH-NEXT:    mv a1, a0
-; RV32ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    li a1, 16
-; RV32ZVFH-NEXT:  .LBB27_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFH-NEXT:    addi a2, a0, -16
-; RV32ZVFH-NEXT:    sltu a0, a0, a2
-; RV32ZVFH-NEXT:    addi a0, a0, -1
-; RV32ZVFH-NEXT:    and a0, a0, a2
-; RV32ZVFH-NEXT:    fsrmi a2, 1
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v16
 ; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsrmi a1, 1
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a1
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1476,37 +1118,21 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_v32f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    li a2, 16
-; RV64ZVFH-NEXT:    mv a1, a0
-; RV64ZVFH-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    li a1, 16
-; RV64ZVFH-NEXT:  .LBB27_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    addi a2, a0, -16
-; RV64ZVFH-NEXT:    sltu a0, a0, a2
-; RV64ZVFH-NEXT:    addi a0, a0, -1
-; RV64ZVFH-NEXT:    and a0, a0, a2
-; RV64ZVFH-NEXT:    fsrmi a2, 1
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v16
 ; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFH-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsrmi a1, 1
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a1
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1514,36 +1140,20 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    li a2, 16
-; RV32ZVFHMIN-NEXT:    mv a1, a0
-; RV32ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    li a1, 16
-; RV32ZVFHMIN-NEXT:  .LBB27_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI27_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI27_0)(a2)
-; RV32ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV32ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV32ZVFHMIN-NEXT:    and a0, a0, a2
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 1
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a1, 1
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a1
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
@@ -1551,37 +1161,21 @@ define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zero
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    li a2, 16
-; RV64ZVFHMIN-NEXT:    mv a1, a0
-; RV64ZVFHMIN-NEXT:    bltu a0, a2, .LBB27_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    li a1, 16
-; RV64ZVFHMIN-NEXT:  .LBB27_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    addi a2, a0, -16
-; RV64ZVFHMIN-NEXT:    sltu a0, a0, a2
-; RV64ZVFHMIN-NEXT:    addi a0, a0, -1
-; RV64ZVFHMIN-NEXT:    and a0, a0, a2
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 1
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a1, 1
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a1
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index a107cb82964eb..f401b2a432963 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -15,27 +15,21 @@
 define <vscale x 1 x bfloat> @vp_floor_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.floor.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -44,7 +38,7 @@ define <vscale x 1 x bfloat> @vp_floor_nxv1bf16(<vscale x 1 x bfloat> %va, <vsca
 define <vscale x 1 x bfloat> @vp_floor_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -67,27 +61,21 @@ define <vscale x 1 x bfloat> @vp_floor_nxv1bf16_unmasked(<vscale x 1 x bfloat> %
 define <vscale x 2 x bfloat> @vp_floor_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.floor.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -96,7 +84,7 @@ define <vscale x 2 x bfloat> @vp_floor_nxv2bf16(<vscale x 2 x bfloat> %va, <vsca
 define <vscale x 2 x bfloat> @vp_floor_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -119,27 +107,21 @@ define <vscale x 2 x bfloat> @vp_floor_nxv2bf16_unmasked(<vscale x 2 x bfloat> %
 define <vscale x 4 x bfloat> @vp_floor_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.floor.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -148,7 +130,7 @@ define <vscale x 4 x bfloat> @vp_floor_nxv4bf16(<vscale x 4 x bfloat> %va, <vsca
 define <vscale x 4 x bfloat> @vp_floor_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -171,27 +153,21 @@ define <vscale x 4 x bfloat> @vp_floor_nxv4bf16_unmasked(<vscale x 4 x bfloat> %
 define <vscale x 8 x bfloat> @vp_floor_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.floor.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -200,7 +176,7 @@ define <vscale x 8 x bfloat> @vp_floor_nxv8bf16(<vscale x 8 x bfloat> %va, <vsca
 define <vscale x 8 x bfloat> @vp_floor_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -223,27 +199,21 @@ define <vscale x 8 x bfloat> @vp_floor_nxv8bf16_unmasked(<vscale x 8 x bfloat> %
 define <vscale x 16 x bfloat> @vp_floor_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.floor.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -252,7 +222,7 @@ define <vscale x 16 x bfloat> @vp_floor_nxv16bf16(<vscale x 16 x bfloat> %va, <v
 define <vscale x 16 x bfloat> @vp_floor_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -275,60 +245,35 @@ define <vscale x 16 x bfloat> @vp_floor_nxv16bf16_unmasked(<vscale x 16 x bfloat
 define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 2
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    fsrmi a0, 2
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.floor.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -337,46 +282,12 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 define <vscale x 32 x bfloat> @vp_floor_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 2
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -385,7 +296,21 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    fsrmi a0, 2
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.floor.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -394,15 +319,13 @@ define <vscale x 32 x bfloat> @vp_floor_nxv32bf16_unmasked(<vscale x 32 x bfloat
 define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -412,27 +335,21 @@ define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -441,7 +358,7 @@ define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x
 define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -457,7 +374,7 @@ define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -480,15 +397,13 @@ define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -498,27 +413,21 @@ define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -527,7 +436,7 @@ define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x
 define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -543,7 +452,7 @@ define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -566,15 +475,13 @@ define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -584,27 +491,21 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -613,7 +514,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
 define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -629,7 +530,7 @@ define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -652,17 +553,13 @@ define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -672,27 +569,21 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -701,7 +592,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
 define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -717,7 +608,7 @@ define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -740,17 +631,13 @@ define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -760,27 +647,21 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -789,7 +670,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
 define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -805,7 +686,7 @@ define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -828,17 +709,13 @@ define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va
 define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    fsrmi a0, 2
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -848,60 +725,35 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    fsrmi a0, 2
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -910,7 +762,7 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
 define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_floor_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -926,46 +778,12 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vp_floor_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 2
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -974,7 +792,21 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    fsrmi a0, 2
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -983,14 +815,12 @@ define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va
 define <vscale x 1 x float> @vp_floor_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1004,7 +834,7 @@ define <vscale x 1 x float> @vp_floor_nxv1f32(<vscale x 1 x float> %va, <vscale
 define <vscale x 1 x float> @vp_floor_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1023,14 +853,12 @@ define <vscale x 1 x float> @vp_floor_nxv1f32_unmasked(<vscale x 1 x float> %va,
 define <vscale x 2 x float> @vp_floor_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1044,7 +872,7 @@ define <vscale x 2 x float> @vp_floor_nxv2f32(<vscale x 2 x float> %va, <vscale
 define <vscale x 2 x float> @vp_floor_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1063,16 +891,12 @@ define <vscale x 2 x float> @vp_floor_nxv2f32_unmasked(<vscale x 2 x float> %va,
 define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1086,7 +910,7 @@ define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale
 define <vscale x 4 x float> @vp_floor_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1105,16 +929,12 @@ define <vscale x 4 x float> @vp_floor_nxv4f32_unmasked(<vscale x 4 x float> %va,
 define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1128,7 +948,7 @@ define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale
 define <vscale x 8 x float> @vp_floor_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1147,16 +967,12 @@ define <vscale x 8 x float> @vp_floor_nxv8f32_unmasked(<vscale x 8 x float> %va,
 define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 2
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1170,7 +986,7 @@ define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vsca
 define <vscale x 16 x float> @vp_floor_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_floor_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1189,14 +1005,12 @@ define <vscale x 16 x float> @vp_floor_nxv16f32_unmasked(<vscale x 16 x float> %
 define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1206,15 +1020,13 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1224,14 +1036,12 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1241,15 +1051,13 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1263,9 +1071,9 @@ define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscal
 define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1278,7 +1086,7 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1294,9 +1102,9 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1309,7 +1117,7 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1329,16 +1137,12 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
 define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1348,17 +1152,13 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1368,16 +1168,12 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1387,17 +1183,13 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1411,9 +1203,9 @@ define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscal
 define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1426,7 +1218,7 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1442,9 +1234,9 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1457,7 +1249,7 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1477,16 +1269,12 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
 define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1496,17 +1284,13 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1516,16 +1300,12 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1535,17 +1315,13 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1559,9 +1335,9 @@ define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscal
 define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1574,7 +1350,7 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1590,9 +1366,9 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1605,7 +1381,7 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1625,16 +1401,12 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
 define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1644,17 +1416,13 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1664,16 +1432,12 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1683,17 +1447,13 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1707,9 +1467,9 @@ define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscal
 define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1722,7 +1482,7 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1738,9 +1498,9 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1753,7 +1513,7 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1773,16 +1533,12 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
 define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1792,17 +1548,13 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1812,16 +1564,12 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1831,17 +1579,13 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1855,9 +1599,9 @@ define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscal
 define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
@@ -1870,7 +1614,7 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1886,9 +1630,9 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
@@ -1901,7 +1645,7 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1923,184 +1667,104 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
 define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a2, 2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 2
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a2, 2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 2
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFHMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFHMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFHMIN-NEXT:    and a2, a3, a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB44_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -2109,136 +1773,104 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
 define <vscale x 16 x double> @vp_floor_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_floor_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    fsrmi a2, 2
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 2
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 2
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_floor_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    fsrmi a2, 2
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    fsrmi a0, 2
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 2
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFHMIN-NEXT:    and a2, a2, a3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 2
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
-; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB45_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 2
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 2
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 3a4591d47e5e4..33969342bd781 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -15,26 +15,20 @@
 define <vscale x 1 x bfloat> @vp_nearbyint_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.nearbyint.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -44,7 +38,7 @@ define <vscale x 1 x bfloat> @vp_nearbyint_nxv1bf16(<vscale x 1 x bfloat> %va, <
 define <vscale x 1 x bfloat> @vp_nearbyint_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -67,26 +61,20 @@ define <vscale x 1 x bfloat> @vp_nearbyint_nxv1bf16_unmasked(<vscale x 1 x bfloa
 define <vscale x 2 x bfloat> @vp_nearbyint_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.nearbyint.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -96,7 +84,7 @@ define <vscale x 2 x bfloat> @vp_nearbyint_nxv2bf16(<vscale x 2 x bfloat> %va, <
 define <vscale x 2 x bfloat> @vp_nearbyint_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -119,26 +107,20 @@ define <vscale x 2 x bfloat> @vp_nearbyint_nxv2bf16_unmasked(<vscale x 2 x bfloa
 define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.nearbyint.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
@@ -148,7 +130,7 @@ define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16(<vscale x 4 x bfloat> %va, <
 define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -171,26 +153,20 @@ define <vscale x 4 x bfloat> @vp_nearbyint_nxv4bf16_unmasked(<vscale x 4 x bfloa
 define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.nearbyint.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
@@ -200,7 +176,7 @@ define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16(<vscale x 8 x bfloat> %va, <
 define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -223,26 +199,20 @@ define <vscale x 8 x bfloat> @vp_nearbyint_nxv8bf16_unmasked(<vscale x 8 x bfloa
 define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.nearbyint.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
@@ -252,7 +222,7 @@ define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16(<vscale x 16 x bfloat> %va
 define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -275,59 +245,34 @@ define <vscale x 16 x bfloat> @vp_nearbyint_nxv16bf16_unmasked(<vscale x 16 x bf
 define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    frflags a2
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    fsflags a2
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    frflags a0
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -337,46 +282,12 @@ define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16(<vscale x 32 x bfloat> %va
 define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    frflags a2
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    fsflags a2
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    frflags a0
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -384,7 +295,21 @@ define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16_unmasked(<vscale x 32 x bf
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    fsflags a0
+; CHECK-NEXT:    frflags a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    fsflags a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.nearbyint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -394,15 +319,13 @@ define <vscale x 32 x bfloat> @vp_nearbyint_nxv32bf16_unmasked(<vscale x 32 x bf
 define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -412,26 +335,20 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -441,7 +358,7 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscal
 define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -457,7 +374,7 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -480,15 +397,13 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %v
 define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -498,26 +413,20 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -527,7 +436,7 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscal
 define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -543,7 +452,7 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -566,15 +475,13 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %v
 define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -584,26 +491,20 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
@@ -613,7 +514,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
 define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -629,7 +530,7 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -652,17 +553,13 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %v
 define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -672,26 +569,20 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
@@ -701,7 +592,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
 define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -717,7 +608,7 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -740,17 +631,13 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %v
 define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
@@ -760,26 +647,20 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
@@ -789,7 +670,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
 define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -805,7 +686,7 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half>
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -828,17 +709,13 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half>
 define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    frflags a0
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
@@ -848,59 +725,34 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    frflags a2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    fsflags a2
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    frflags a0
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    frflags a0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
@@ -910,7 +762,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
 define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_nearbyint_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -926,46 +778,12 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ;
 ; ZVFHMIN-LABEL: vp_nearbyint_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    frflags a2
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    fsflags a2
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    frflags a0
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -973,7 +791,21 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    fsflags a0
+; ZVFHMIN-NEXT:    frflags a0
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    fsflags a0
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -983,14 +815,12 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
 define <vscale x 1 x float> @vp_nearbyint_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -1004,7 +834,7 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32(<vscale x 1 x float> %va, <vsc
 define <vscale x 1 x float> @vp_nearbyint_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1023,14 +853,12 @@ define <vscale x 1 x float> @vp_nearbyint_nxv1f32_unmasked(<vscale x 1 x float>
 define <vscale x 2 x float> @vp_nearbyint_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -1044,7 +872,7 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32(<vscale x 2 x float> %va, <vsc
 define <vscale x 2 x float> @vp_nearbyint_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1063,16 +891,12 @@ define <vscale x 2 x float> @vp_nearbyint_nxv2f32_unmasked(<vscale x 2 x float>
 define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -1086,7 +910,7 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vsc
 define <vscale x 4 x float> @vp_nearbyint_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1105,16 +929,12 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32_unmasked(<vscale x 4 x float>
 define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -1128,7 +948,7 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vsc
 define <vscale x 8 x float> @vp_nearbyint_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1147,16 +967,12 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32_unmasked(<vscale x 8 x float>
 define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    frflags a0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -1170,7 +986,7 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <
 define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_nearbyint_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1189,14 +1005,12 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x floa
 define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    frflags a0
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1206,15 +1020,13 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    frflags a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1224,14 +1036,12 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1241,15 +1051,13 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    frflags a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1263,9 +1071,9 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <v
 define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    frflags a0
@@ -1278,7 +1086,7 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1294,9 +1102,9 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
@@ -1309,7 +1117,7 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1329,16 +1137,12 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
 define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    frflags a0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1348,17 +1152,13 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    frflags a0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1368,16 +1168,12 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1387,17 +1183,13 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    frflags a0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1411,9 +1203,9 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <v
 define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    frflags a0
@@ -1426,7 +1218,7 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1442,9 +1234,9 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
@@ -1457,7 +1249,7 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1477,16 +1269,12 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
 define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    frflags a0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1496,17 +1284,13 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    frflags a0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1516,16 +1300,12 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1535,17 +1315,13 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    frflags a0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1559,9 +1335,9 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <v
 define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    frflags a0
@@ -1574,7 +1350,7 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1590,9 +1366,9 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
@@ -1605,7 +1381,7 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1625,16 +1401,12 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
 define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    frflags a0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1644,17 +1416,13 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    frflags a0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1664,16 +1432,12 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1683,17 +1447,13 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    frflags a0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1707,9 +1467,9 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <v
 define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    frflags a0
@@ -1722,7 +1482,7 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1738,9 +1498,9 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
@@ -1753,7 +1513,7 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1773,16 +1533,12 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
 define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    frflags a0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1792,17 +1548,13 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    frflags a0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1812,16 +1564,12 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1831,17 +1579,13 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    frflags a0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1855,9 +1599,9 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <v
 define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    frflags a0
@@ -1870,7 +1614,7 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1886,9 +1630,9 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
@@ -1901,7 +1645,7 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1923,183 +1667,103 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
 define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    frflags a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    frflags a0
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsflags a2
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    fsflags a0
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    frflags a0
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsflags a0
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    frflags a2
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    frflags a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsflags a2
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    fsflags a0
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    frflags a0
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsflags a0
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFHMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFHMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFHMIN-NEXT:    and a2, a3, a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    frflags a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    frflags a0
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsflags a2
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB44_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    fsflags a0
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    frflags a0
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsflags a0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    frflags a2
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    frflags a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsflags a2
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    fsflags a0
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    frflags a0
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsflags a0
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
@@ -2109,135 +1773,103 @@ define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va,
 define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    frflags a2
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    fsflags a2
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFH-NEXT:    frflags a0
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsflags a0
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    frflags a0
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsflags a0
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    frflags a2
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    fsflags a2
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    frflags a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsflags a0
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    frflags a0
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsflags a0
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFHMIN-NEXT:    and a2, a2, a3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    frflags a2
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    fsflags a2
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB45_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFHMIN-NEXT:    frflags a0
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsflags a0
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    frflags a0
+; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsflags a0
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    frflags a2
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    fsflags a2
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    frflags a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsflags a0
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    frflags a0
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsflags a0
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index 82235f08b5226..538abb6191877 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -15,25 +15,19 @@
 define <vscale x 1 x bfloat> @vp_rint_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.rint.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -42,7 +36,7 @@ define <vscale x 1 x bfloat> @vp_rint_nxv1bf16(<vscale x 1 x bfloat> %va, <vscal
 define <vscale x 1 x bfloat> @vp_rint_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -63,25 +57,19 @@ define <vscale x 1 x bfloat> @vp_rint_nxv1bf16_unmasked(<vscale x 1 x bfloat> %v
 define <vscale x 2 x bfloat> @vp_rint_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.rint.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -90,7 +78,7 @@ define <vscale x 2 x bfloat> @vp_rint_nxv2bf16(<vscale x 2 x bfloat> %va, <vscal
 define <vscale x 2 x bfloat> @vp_rint_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -111,25 +99,19 @@ define <vscale x 2 x bfloat> @vp_rint_nxv2bf16_unmasked(<vscale x 2 x bfloat> %v
 define <vscale x 4 x bfloat> @vp_rint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.rint.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -138,7 +120,7 @@ define <vscale x 4 x bfloat> @vp_rint_nxv4bf16(<vscale x 4 x bfloat> %va, <vscal
 define <vscale x 4 x bfloat> @vp_rint_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -159,25 +141,19 @@ define <vscale x 4 x bfloat> @vp_rint_nxv4bf16_unmasked(<vscale x 4 x bfloat> %v
 define <vscale x 8 x bfloat> @vp_rint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.rint.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -186,7 +162,7 @@ define <vscale x 8 x bfloat> @vp_rint_nxv8bf16(<vscale x 8 x bfloat> %va, <vscal
 define <vscale x 8 x bfloat> @vp_rint_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -207,25 +183,19 @@ define <vscale x 8 x bfloat> @vp_rint_nxv8bf16_unmasked(<vscale x 8 x bfloat> %v
 define <vscale x 16 x bfloat> @vp_rint_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.rint.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -234,7 +204,7 @@ define <vscale x 16 x bfloat> @vp_rint_nxv16bf16(<vscale x 16 x bfloat> %va, <vs
 define <vscale x 16 x bfloat> @vp_rint_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -255,56 +225,31 @@ define <vscale x 16 x bfloat> @vp_rint_nxv16bf16_unmasked(<vscale x 16 x bfloat>
 define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.rint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -313,51 +258,31 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16(<vscale x 32 x bfloat> %va, <vs
 define <vscale x 32 x bfloat> @vp_rint_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.rint.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -366,14 +291,12 @@ define <vscale x 32 x bfloat> @vp_rint_nxv32bf16_unmasked(<vscale x 32 x bfloat>
 define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
@@ -382,25 +305,19 @@ define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -409,7 +326,7 @@ define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1
 define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -423,7 +340,7 @@ define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i3
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -444,14 +361,12 @@ define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i3
 define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
@@ -460,25 +375,19 @@ define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -487,7 +396,7 @@ define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2
 define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -501,7 +410,7 @@ define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i3
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -522,14 +431,12 @@ define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i3
 define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
@@ -538,25 +445,19 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -565,7 +466,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
 define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -579,7 +480,7 @@ define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i3
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -600,16 +501,12 @@ define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i3
 define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
@@ -618,25 +515,19 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -645,7 +536,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
 define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -659,7 +550,7 @@ define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i3
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -680,16 +571,12 @@ define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i3
 define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
@@ -698,25 +585,19 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -725,7 +606,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
 define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -739,7 +620,7 @@ define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -760,16 +641,12 @@ define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va,
 define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
@@ -778,56 +655,31 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -836,7 +688,7 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
 define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_rint_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -850,51 +702,31 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 ;
 ; ZVFHMIN-LABEL: vp_rint_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -903,13 +735,11 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
 define <vscale x 1 x float> @vp_rint_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
@@ -922,7 +752,7 @@ define <vscale x 1 x float> @vp_rint_nxv1f32(<vscale x 1 x float> %va, <vscale x
 define <vscale x 1 x float> @vp_rint_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -939,13 +769,11 @@ define <vscale x 1 x float> @vp_rint_nxv1f32_unmasked(<vscale x 1 x float> %va,
 define <vscale x 2 x float> @vp_rint_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
@@ -958,7 +786,7 @@ define <vscale x 2 x float> @vp_rint_nxv2f32(<vscale x 2 x float> %va, <vscale x
 define <vscale x 2 x float> @vp_rint_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -975,15 +803,11 @@ define <vscale x 2 x float> @vp_rint_nxv2f32_unmasked(<vscale x 2 x float> %va,
 define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
@@ -996,7 +820,7 @@ define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x
 define <vscale x 4 x float> @vp_rint_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1013,15 +837,11 @@ define <vscale x 4 x float> @vp_rint_nxv4f32_unmasked(<vscale x 4 x float> %va,
 define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
@@ -1034,7 +854,7 @@ define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x
 define <vscale x 8 x float> @vp_rint_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1051,15 +871,11 @@ define <vscale x 8 x float> @vp_rint_nxv8f32_unmasked(<vscale x 8 x float> %va,
 define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
@@ -1072,7 +888,7 @@ define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscal
 define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_rint_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1089,13 +905,11 @@ define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %v
 define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1104,14 +918,12 @@ define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1120,13 +932,11 @@ define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv1f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1135,14 +945,12 @@ define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
@@ -1155,9 +963,9 @@ define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale
 define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -1168,7 +976,7 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1182,9 +990,9 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv1f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
@@ -1195,7 +1003,7 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1213,15 +1021,11 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
 define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1230,16 +1034,12 @@ define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1248,15 +1048,11 @@ define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv2f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1265,16 +1061,12 @@ define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
@@ -1287,9 +1079,9 @@ define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale
 define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
@@ -1300,7 +1092,7 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1314,9 +1106,9 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv2f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
@@ -1327,7 +1119,7 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1345,15 +1137,11 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
 define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1362,16 +1150,12 @@ define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1380,15 +1164,11 @@ define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv4f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1397,16 +1177,12 @@ define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
@@ -1419,9 +1195,9 @@ define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale
 define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -1432,7 +1208,7 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1446,9 +1222,9 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv4f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
@@ -1459,7 +1235,7 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1477,15 +1253,11 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
 define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1494,16 +1266,12 @@ define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1512,15 +1280,11 @@ define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv7f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1529,16 +1293,12 @@ define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1551,9 +1311,9 @@ define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale
 define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -1564,7 +1324,7 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1578,9 +1338,9 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv7f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -1591,7 +1351,7 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1609,15 +1369,11 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
 define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1626,16 +1382,12 @@ define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1644,15 +1396,11 @@ define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv8f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1661,16 +1409,12 @@ define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
@@ -1683,9 +1427,9 @@ define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale
 define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -1696,7 +1440,7 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1710,9 +1454,9 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv8f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
@@ -1723,7 +1467,7 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1743,168 +1487,88 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
 define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv16f64:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFMIN-NEXT:    and a2, a3, a2
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFMIN-NEXT:  # %bb.1:
-; RV32ZVFMIN-NEXT:    mv a0, a1
-; RV32ZVFMIN-NEXT:  .LBB44_2:
-; RV32ZVFMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -1913,120 +1577,88 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
 define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_rint_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_rint_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFMIN-LABEL: vp_rint_nxv16f64_unmasked:
 ; RV32ZVFMIN:       # %bb.0:
-; RV32ZVFMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFMIN-NEXT:    and a2, a2, a3
-; RV32ZVFMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFMIN-NEXT:  # %bb.1:
-; RV32ZVFMIN-NEXT:    mv a0, a1
-; RV32ZVFMIN-NEXT:  .LBB45_2:
-; RV32ZVFMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFMIN-NEXT:    vfabs.v v24, v8
 ; RV32ZVFMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index eae28fafb4300..91dd175d3cc2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -15,27 +15,21 @@
 define <vscale x 1 x bfloat> @vp_round_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.round.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -44,7 +38,7 @@ define <vscale x 1 x bfloat> @vp_round_nxv1bf16(<vscale x 1 x bfloat> %va, <vsca
 define <vscale x 1 x bfloat> @vp_round_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -67,27 +61,21 @@ define <vscale x 1 x bfloat> @vp_round_nxv1bf16_unmasked(<vscale x 1 x bfloat> %
 define <vscale x 2 x bfloat> @vp_round_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.round.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -96,7 +84,7 @@ define <vscale x 2 x bfloat> @vp_round_nxv2bf16(<vscale x 2 x bfloat> %va, <vsca
 define <vscale x 2 x bfloat> @vp_round_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -119,27 +107,21 @@ define <vscale x 2 x bfloat> @vp_round_nxv2bf16_unmasked(<vscale x 2 x bfloat> %
 define <vscale x 4 x bfloat> @vp_round_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.round.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -148,7 +130,7 @@ define <vscale x 4 x bfloat> @vp_round_nxv4bf16(<vscale x 4 x bfloat> %va, <vsca
 define <vscale x 4 x bfloat> @vp_round_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -171,27 +153,21 @@ define <vscale x 4 x bfloat> @vp_round_nxv4bf16_unmasked(<vscale x 4 x bfloat> %
 define <vscale x 8 x bfloat> @vp_round_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.round.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -200,7 +176,7 @@ define <vscale x 8 x bfloat> @vp_round_nxv8bf16(<vscale x 8 x bfloat> %va, <vsca
 define <vscale x 8 x bfloat> @vp_round_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -223,27 +199,21 @@ define <vscale x 8 x bfloat> @vp_round_nxv8bf16_unmasked(<vscale x 8 x bfloat> %
 define <vscale x 16 x bfloat> @vp_round_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.round.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -252,7 +222,7 @@ define <vscale x 16 x bfloat> @vp_round_nxv16bf16(<vscale x 16 x bfloat> %va, <v
 define <vscale x 16 x bfloat> @vp_round_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -275,60 +245,35 @@ define <vscale x 16 x bfloat> @vp_round_nxv16bf16_unmasked(<vscale x 16 x bfloat
 define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 4
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.round.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -337,46 +282,12 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16(<vscale x 32 x bfloat> %va, <v
 define <vscale x 32 x bfloat> @vp_round_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 4
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -385,7 +296,21 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16_unmasked(<vscale x 32 x bfloat
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    fsrmi a0, 4
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.round.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -394,15 +319,13 @@ define <vscale x 32 x bfloat> @vp_round_nxv32bf16_unmasked(<vscale x 32 x bfloat
 define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -412,27 +335,21 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -441,7 +358,7 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
 define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -457,7 +374,7 @@ define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -480,15 +397,13 @@ define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i
 define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -498,27 +413,21 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -527,7 +436,7 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
 define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -543,7 +452,7 @@ define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -566,15 +475,13 @@ define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i
 define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -584,27 +491,21 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -613,7 +514,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
 define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -629,7 +530,7 @@ define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -652,17 +553,13 @@ define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i
 define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -672,27 +569,21 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -701,7 +592,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
 define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -717,7 +608,7 @@ define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -740,17 +631,13 @@ define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i
 define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -760,27 +647,21 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -789,7 +670,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
 define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -805,7 +686,7 @@ define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -828,17 +709,13 @@ define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va
 define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    fsrmi a0, 4
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -848,60 +725,35 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    fsrmi a0, 4
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -910,7 +762,7 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
 define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_round_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -926,46 +778,12 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ;
 ; ZVFHMIN-LABEL: vp_round_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 4
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 4
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -974,7 +792,21 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    fsrmi a0, 4
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -983,14 +815,12 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
 define <vscale x 1 x float> @vp_round_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1004,7 +834,7 @@ define <vscale x 1 x float> @vp_round_nxv1f32(<vscale x 1 x float> %va, <vscale
 define <vscale x 1 x float> @vp_round_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1023,14 +853,12 @@ define <vscale x 1 x float> @vp_round_nxv1f32_unmasked(<vscale x 1 x float> %va,
 define <vscale x 2 x float> @vp_round_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1044,7 +872,7 @@ define <vscale x 2 x float> @vp_round_nxv2f32(<vscale x 2 x float> %va, <vscale
 define <vscale x 2 x float> @vp_round_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1063,16 +891,12 @@ define <vscale x 2 x float> @vp_round_nxv2f32_unmasked(<vscale x 2 x float> %va,
 define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1086,7 +910,7 @@ define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale
 define <vscale x 4 x float> @vp_round_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1105,16 +929,12 @@ define <vscale x 4 x float> @vp_round_nxv4f32_unmasked(<vscale x 4 x float> %va,
 define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1128,7 +948,7 @@ define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale
 define <vscale x 8 x float> @vp_round_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1147,16 +967,12 @@ define <vscale x 8 x float> @vp_round_nxv8f32_unmasked(<vscale x 8 x float> %va,
 define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 4
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1170,7 +986,7 @@ define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vsca
 define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_round_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1189,14 +1005,12 @@ define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %
 define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1206,15 +1020,13 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1224,14 +1036,12 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv1f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1241,15 +1051,13 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1263,9 +1071,9 @@ define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscal
 define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1278,7 +1086,7 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1294,9 +1102,9 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1309,7 +1117,7 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1329,16 +1137,12 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
 define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1348,17 +1152,13 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1368,16 +1168,12 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1387,17 +1183,13 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1411,9 +1203,9 @@ define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscal
 define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1426,7 +1218,7 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1442,9 +1234,9 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1457,7 +1249,7 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1477,16 +1269,12 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
 define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1496,17 +1284,13 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1516,16 +1300,12 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1535,17 +1315,13 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1559,9 +1335,9 @@ define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscal
 define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1574,7 +1350,7 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1590,9 +1366,9 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1605,7 +1381,7 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1625,16 +1401,12 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
 define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1644,17 +1416,13 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1664,16 +1432,12 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv7f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1683,17 +1447,13 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1707,9 +1467,9 @@ define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscal
 define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1722,7 +1482,7 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1738,9 +1498,9 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1753,7 +1513,7 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1773,16 +1533,12 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
 define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1792,17 +1548,13 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1812,16 +1564,12 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1831,17 +1579,13 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1855,9 +1599,9 @@ define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscal
 define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
@@ -1870,7 +1614,7 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1886,9 +1630,9 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
@@ -1901,7 +1645,7 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1923,184 +1667,104 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
 define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a2, 4
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 4
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a2, 4
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 4
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFHMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFHMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFHMIN-NEXT:    and a2, a3, a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 4
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB44_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 4
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -2109,136 +1773,104 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
 define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_round_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    fsrmi a2, 4
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 4
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 4
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_round_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    fsrmi a2, 4
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    fsrmi a0, 4
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 4
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFHMIN-NEXT:    and a2, a2, a3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 4
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
-; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB45_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 4
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 4
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index b9c435b7fb8d8..b610f8b93efa2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -15,27 +15,21 @@
 define <vscale x 1 x bfloat> @vp_roundeven_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.roundeven.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -44,7 +38,7 @@ define <vscale x 1 x bfloat> @vp_roundeven_nxv1bf16(<vscale x 1 x bfloat> %va, <
 define <vscale x 1 x bfloat> @vp_roundeven_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -67,27 +61,21 @@ define <vscale x 1 x bfloat> @vp_roundeven_nxv1bf16_unmasked(<vscale x 1 x bfloa
 define <vscale x 2 x bfloat> @vp_roundeven_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.roundeven.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -96,7 +84,7 @@ define <vscale x 2 x bfloat> @vp_roundeven_nxv2bf16(<vscale x 2 x bfloat> %va, <
 define <vscale x 2 x bfloat> @vp_roundeven_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -119,27 +107,21 @@ define <vscale x 2 x bfloat> @vp_roundeven_nxv2bf16_unmasked(<vscale x 2 x bfloa
 define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.roundeven.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -148,7 +130,7 @@ define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16(<vscale x 4 x bfloat> %va, <
 define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -171,27 +153,21 @@ define <vscale x 4 x bfloat> @vp_roundeven_nxv4bf16_unmasked(<vscale x 4 x bfloa
 define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.roundeven.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -200,7 +176,7 @@ define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16(<vscale x 8 x bfloat> %va, <
 define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -223,27 +199,21 @@ define <vscale x 8 x bfloat> @vp_roundeven_nxv8bf16_unmasked(<vscale x 8 x bfloa
 define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.roundeven.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -252,7 +222,7 @@ define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16(<vscale x 16 x bfloat> %va
 define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -275,60 +245,35 @@ define <vscale x 16 x bfloat> @vp_roundeven_nxv16bf16_unmasked(<vscale x 16 x bf
 define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 0
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundeven.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -337,46 +282,12 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16(<vscale x 32 x bfloat> %va
 define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 0
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
 ; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -385,7 +296,21 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16_unmasked(<vscale x 32 x bf
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    fsrmi a0, 0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundeven.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -394,15 +319,13 @@ define <vscale x 32 x bfloat> @vp_roundeven_nxv32bf16_unmasked(<vscale x 32 x bf
 define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -412,27 +335,21 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -441,7 +358,7 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
 define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -457,7 +374,7 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
@@ -480,15 +397,13 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %v
 define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -498,27 +413,21 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -527,7 +436,7 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
 define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -543,7 +452,7 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
@@ -566,15 +475,13 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %v
 define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -584,27 +491,21 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -613,7 +514,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
 define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -629,7 +530,7 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
@@ -652,17 +553,13 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %v
 define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -672,27 +569,21 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -701,7 +592,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
 define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -717,7 +608,7 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %v
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
@@ -740,17 +631,13 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %v
 define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -760,27 +647,21 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -789,7 +670,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
 define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -805,7 +686,7 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
@@ -828,17 +709,13 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half>
 define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; ZVFH-NEXT:    fsrmi a0, 0
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    fsrm a0
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -848,60 +725,35 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    fsrmi a0, 0
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    fsrm a0
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -910,7 +762,7 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
 define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundeven_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
@@ -926,46 +778,12 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundeven_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 0
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; ZVFHMIN-NEXT:    fsrmi a0, 0
 ; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
@@ -974,7 +792,21 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    fsrmi a0, 0
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -983,14 +815,12 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
 define <vscale x 1 x float> @vp_roundeven_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1004,7 +834,7 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32(<vscale x 1 x float> %va, <vsc
 define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1023,14 +853,12 @@ define <vscale x 1 x float> @vp_roundeven_nxv1f32_unmasked(<vscale x 1 x float>
 define <vscale x 2 x float> @vp_roundeven_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1044,7 +872,7 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32(<vscale x 2 x float> %va, <vsc
 define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1063,16 +891,12 @@ define <vscale x 2 x float> @vp_roundeven_nxv2f32_unmasked(<vscale x 2 x float>
 define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1086,7 +910,7 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vsc
 define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1105,16 +929,12 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32_unmasked(<vscale x 4 x float>
 define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1128,7 +948,7 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vsc
 define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1147,16 +967,12 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32_unmasked(<vscale x 8 x float>
 define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
 ; CHECK-NEXT:    fsrmi a0, 0
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    fsrm a0
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1170,7 +986,7 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <
 define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundeven_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
@@ -1189,14 +1005,12 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x floa
 define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1206,15 +1020,13 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1224,14 +1036,12 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1241,15 +1051,13 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
@@ -1263,9 +1071,9 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <v
 define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1278,7 +1086,7 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1294,9 +1102,9 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1309,7 +1117,7 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1329,16 +1137,12 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
 define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1348,17 +1152,13 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1368,16 +1168,12 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1387,17 +1183,13 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
@@ -1411,9 +1203,9 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <v
 define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1426,7 +1218,7 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1442,9 +1234,9 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1457,7 +1249,7 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1477,16 +1269,12 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
 define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1496,17 +1284,13 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1516,16 +1300,12 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1535,17 +1315,13 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
@@ -1559,9 +1335,9 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <v
 define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1574,7 +1350,7 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1590,9 +1366,9 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1605,7 +1381,7 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1625,16 +1401,12 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
 define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1644,17 +1416,13 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1664,16 +1432,12 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1683,17 +1447,13 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1707,9 +1467,9 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <v
 define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1722,7 +1482,7 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1738,9 +1498,9 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1753,7 +1513,7 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1773,16 +1533,12 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
 define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1792,17 +1548,13 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1812,16 +1564,12 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1831,17 +1579,13 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
@@ -1855,9 +1599,9 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <v
 define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
@@ -1870,7 +1614,7 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
@@ -1886,9 +1630,9 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
@@ -1901,7 +1645,7 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
@@ -1923,184 +1667,104 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
 define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a2, 0
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    fsrmi a0, 0
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a2, 0
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    fsrmi a0, 0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFHMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFHMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFHMIN-NEXT:    and a2, a3, a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 0
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB44_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 0
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -2109,136 +1773,104 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
 define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    fsrmi a2, 0
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFH-NEXT:    fsrmi a0, 0
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    fsrm a0
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    fsrmi a0, 0
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    fsrmi a2, 0
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    fsrmi a0, 0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    fsrm a0
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    fsrmi a0, 0
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFHMIN-NEXT:    and a2, a2, a3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 0
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
-; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB45_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
 ; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    fsrm a0
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 0
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
 ; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    fsrm a0
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    fsrmi a0, 0
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 7f4aaff7f8635..23b4920f2dce2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -15,27 +15,19 @@
 define <vscale x 1 x bfloat> @vp_roundtozero_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv1bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x bfloat> @llvm.vp.roundtozero.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x bfloat> %v
@@ -44,16 +36,14 @@ define <vscale x 1 x bfloat> @vp_roundtozero_nxv1bf16(<vscale x 1 x bfloat> %va,
 define <vscale x 1 x bfloat> @vp_roundtozero_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv1bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v8, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -67,27 +57,19 @@ define <vscale x 1 x bfloat> @vp_roundtozero_nxv1bf16_unmasked(<vscale x 1 x bfl
 define <vscale x 2 x bfloat> @vp_roundtozero_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v8, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v11, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv.v.v v0, v8
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; CHECK-NEXT:    fsrm a0
-; CHECK-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v9
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.vp.roundtozero.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x bfloat> %v
@@ -96,16 +78,14 @@ define <vscale x 2 x bfloat> @vp_roundtozero_nxv2bf16(<vscale x 2 x bfloat> %va,
 define <vscale x 2 x bfloat> @vp_roundtozero_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv2bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v9
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v8, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -119,27 +99,19 @@ define <vscale x 2 x bfloat> @vp_roundtozero_nxv2bf16_unmasked(<vscale x 2 x bfl
 define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv4bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v13, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v10, v0.t
+; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v13, v8, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v13
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v10
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x bfloat> @llvm.vp.roundtozero.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x bfloat> %v
@@ -148,16 +120,14 @@ define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16(<vscale x 4 x bfloat> %va,
 define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv4bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v10
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v8, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
@@ -171,27 +141,19 @@ define <vscale x 4 x bfloat> @vp_roundtozero_nxv4bf16_unmasked(<vscale x 4 x bfl
 define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v17, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v12, v0.t
+; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v17, v8, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v17
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v12
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x bfloat> @llvm.vp.roundtozero.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x bfloat> %v
@@ -200,16 +162,14 @@ define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16(<vscale x 8 x bfloat> %va,
 define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv8bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v12
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v8, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
@@ -223,27 +183,19 @@ define <vscale x 8 x bfloat> @vp_roundtozero_nxv8bf16_unmasked(<vscale x 8 x bfl
 define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv16bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
-; CHECK-NEXT:    vmv1r.v v25, v0
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v8, v16, v0.t
+; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v25, v8, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v24
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x bfloat> @llvm.vp.roundtozero.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x bfloat> %v
@@ -252,16 +204,14 @@ define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16(<vscale x 16 x bfloat> %
 define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv16bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v8, v16
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v8, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
@@ -275,60 +225,31 @@ define <vscale x 16 x bfloat> @vp_roundtozero_nxv16bf16_unmasked(<vscale x 16 x
 define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv32bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v7, v0
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vslidedown.vx v6, v0, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v5, v6
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 1
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
+; CHECK-NEXT:    vmflt.vf v0, v24, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v6
+; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB10_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB10_2:
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v7
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v8, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundtozero.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -337,55 +258,31 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16(<vscale x 32 x bfloat> %
 define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv32bf16_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmset.m v16
-; CHECK-NEXT:    lui a3, 307200
-; CHECK-NEXT:    slli a1, a2, 1
-; CHECK-NEXT:    srli a2, a2, 2
-; CHECK-NEXT:    fmv.w.x fa5, a3
-; CHECK-NEXT:    sub a3, a0, a1
-; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v7, v16, a2
-; CHECK-NEXT:    sltu a2, a0, a3
-; CHECK-NEXT:    vmv1r.v v6, v7
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    and a2, a2, a3
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfabs.v v16, v24, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a2, 1
-; CHECK-NEXT:    vmv1r.v v0, v6
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT:    fsrm a2
-; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v7
-; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24, v0.t
-; CHECK-NEXT:    bltu a0, a1, .LBB11_2
-; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    mv a0, a1
-; CHECK-NEXT:  .LBB11_2:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v24, v16
+; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v24, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfwcvtbf16.f.f.v v24, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v8, v24
+; CHECK-NEXT:    vmflt.vf v0, v8, fa5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vfncvtbf16.f.f.w v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
+; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; CHECK-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vfncvtbf16.f.f.w v12, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x bfloat> @llvm.vp.roundtozero.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x bfloat> %v
@@ -394,17 +291,13 @@ define <vscale x 32 x bfloat> @vp_roundtozero_nxv32bf16_unmasked(<vscale x 32 x
 define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv1f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -412,27 +305,19 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv1f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %v
@@ -441,15 +326,13 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
 define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv1f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -457,16 +340,14 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv1f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -480,17 +361,13 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half>
 define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv2f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -498,27 +375,19 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv2f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v9, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v8, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v11, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v11, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv.v.v v0, v8
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v11, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v11, v11, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v10, v11, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v9
+; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v9
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %v
@@ -527,15 +396,13 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
 define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv2f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -543,16 +410,14 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv2f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v9, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v9
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v9, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v9, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v9, v8, v9, v0.t
@@ -566,17 +431,13 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half>
 define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv4f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -584,27 +445,19 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv4f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v13, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v13, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v13
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v10
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %v
@@ -613,15 +466,13 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
 define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv4f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFH-NEXT:    vfabs.v v9, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -629,16 +480,14 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv4f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v10, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v10
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v10, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v10, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v10, v8, v10, v0.t
@@ -652,19 +501,13 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half>
 define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv8f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFH-NEXT:    vmv1r.v v12, v0
-; ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vmv1r.v v0, v12
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -672,27 +515,19 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv8f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v17, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v17, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v17
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v12
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %v
@@ -701,15 +536,13 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
 define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv8f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFH-NEXT:    vfabs.v v10, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -717,16 +550,14 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half>
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv8f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v12, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v12
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v12, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v12, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v12, v8, v12, v0.t
@@ -740,19 +571,13 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half>
 define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv16f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT:    vmv1r.v v16, v0
-; ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vmv1r.v v0, v16
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -760,27 +585,19 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
-; ZVFHMIN-NEXT:    vmv1r.v v25, v0
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v25, v8, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v25
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v24
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x half> %v
@@ -789,15 +606,13 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
 define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv16f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFH-NEXT:    vfabs.v v12, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v12, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -805,16 +620,14 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x hal
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv16f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
 ; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v8, v16
 ; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v8, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v8, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v8, v8, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v8, v16, v0.t
@@ -828,19 +641,13 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x hal
 define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv32f16:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT:    vmv1r.v v24, v0
-; ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
-; ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vmv1r.v v0, v24
-; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
-; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -848,60 +655,31 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv32f16:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v5, v6
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v5, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v5
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
+; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
+; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB22_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB22_2:
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v8, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v8, v7
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v8, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v8
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v
@@ -910,15 +688,13 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
 define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
 ; ZVFH-LABEL: vp_roundtozero_nxv32f16_unmasked:
 ; ZVFH:       # %bb.0:
-; ZVFH-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVFH-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; ZVFH-NEXT:    vfabs.v v16, v8
 ; ZVFH-NEXT:    li a0, 25
 ; ZVFH-NEXT:    slli a0, a0, 10
 ; ZVFH-NEXT:    fmv.h.x fa5, a0
 ; ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; ZVFH-NEXT:    fsrmi a0, 1
-; ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; ZVFH-NEXT:    fsrm a0
+; ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; ZVFH-NEXT:    vsetvli zero, zero, e16, m8, ta, mu
 ; ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -926,55 +702,31 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 ;
 ; ZVFHMIN-LABEL: vp_roundtozero_nxv32f16_unmasked:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    csrr a2, vlenb
-; ZVFHMIN-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT:    vmset.m v16
-; ZVFHMIN-NEXT:    lui a3, 307200
-; ZVFHMIN-NEXT:    slli a1, a2, 1
-; ZVFHMIN-NEXT:    srli a2, a2, 2
-; ZVFHMIN-NEXT:    fmv.w.x fa5, a3
-; ZVFHMIN-NEXT:    sub a3, a0, a1
-; ZVFHMIN-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT:    vslidedown.vx v7, v16, a2
-; ZVFHMIN-NEXT:    sltu a2, a0, a3
-; ZVFHMIN-NEXT:    vmv1r.v v6, v7
-; ZVFHMIN-NEXT:    addi a2, a2, -1
-; ZVFHMIN-NEXT:    and a2, a2, a3
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfabs.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vmflt.vf v6, v16, fa5, v0.t
-; ZVFHMIN-NEXT:    fsrmi a2, 1
-; ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v24, v0.t
-; ZVFHMIN-NEXT:    fsrm a2
-; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
-; ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24, v0.t
-; ZVFHMIN-NEXT:    bltu a0, a1, .LBB23_2
-; ZVFHMIN-NEXT:  # %bb.1:
-; ZVFHMIN-NEXT:    mv a0, a1
-; ZVFHMIN-NEXT:  .LBB23_2:
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT:    lui a0, 307200
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
 ; ZVFHMIN-NEXT:    vfabs.v v24, v16
+; ZVFHMIN-NEXT:    fmv.w.x fa5, a0
 ; ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; ZVFHMIN-NEXT:    fsrmi a0, 1
-; ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; ZVFHMIN-NEXT:    fsrm a0
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfabs.v v8, v24
+; ZVFHMIN-NEXT:    vmflt.vf v0, v8, fa5
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
 ; ZVFHMIN-NEXT:    vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
+; ZVFHMIN-NEXT:    vfsgnj.vv v24, v16, v24, v0.t
+; ZVFHMIN-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT:    vfncvt.f.f.w v12, v24
 ; ZVFHMIN-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x half> %v
@@ -983,16 +735,12 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
 define <vscale x 1 x float> @vp_roundtozero_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1004,14 +752,12 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32(<vscale x 1 x float> %va, <v
 define <vscale x 1 x float> @vp_roundtozero_nxv1f32_unmasked(<vscale x 1 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv1f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1023,16 +769,12 @@ define <vscale x 1 x float> @vp_roundtozero_nxv1f32_unmasked(<vscale x 1 x float
 define <vscale x 2 x float> @vp_roundtozero_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfabs.v v9, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v9, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1044,14 +786,12 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32(<vscale x 2 x float> %va, <v
 define <vscale x 2 x float> @vp_roundtozero_nxv2f32_unmasked(<vscale x 2 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv2f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vfabs.v v9, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v9, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1063,18 +803,12 @@ define <vscale x 2 x float> @vp_roundtozero_nxv2f32_unmasked(<vscale x 2 x float
 define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v12, v0
-; CHECK-NEXT:    vfabs.v v10, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v12
-; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v10, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1086,14 +820,12 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <v
 define <vscale x 4 x float> @vp_roundtozero_nxv4f32_unmasked(<vscale x 4 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv4f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vfabs.v v10, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v10, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1105,18 +837,12 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32_unmasked(<vscale x 4 x float
 define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v16, v0
-; CHECK-NEXT:    vfabs.v v12, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v16
-; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v12, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1128,14 +854,12 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <v
 define <vscale x 8 x float> @vp_roundtozero_nxv8f32_unmasked(<vscale x 8 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv8f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vfabs.v v12, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v12, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1147,18 +871,12 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32_unmasked(<vscale x 8 x float
 define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    vfabs.v v16, v8, v0.t
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; CHECK-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vmflt.vf v0, v16, fa5
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1170,14 +888,12 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va,
 define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x float> %va, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_roundtozero_nxv16f32_unmasked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vfabs.v v16, v8
 ; CHECK-NEXT:    lui a0, 307200
 ; CHECK-NEXT:    fmv.w.x fa5, a0
 ; CHECK-NEXT:    vmflt.vf v0, v16, fa5
-; CHECK-NEXT:    fsrmi a0, 1
-; CHECK-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT:    fsrm a0
+; CHECK-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; CHECK-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1189,16 +905,12 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x fl
 define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v9, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1206,17 +918,13 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1224,16 +932,12 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI34_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI34_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1241,17 +945,13 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1263,14 +963,12 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va,
 define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v9, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1278,15 +976,13 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v9, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v9, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1294,14 +990,12 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI35_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI35_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1309,15 +1003,13 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v9, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v9, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v9, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v9, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v9, v9, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v9, v8, v0.t
@@ -1329,18 +1021,12 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
 define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v12
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v10, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1348,19 +1034,13 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v12, v0
-; RV64ZVFH-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v12
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1368,18 +1048,12 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v12, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI36_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI36_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1387,19 +1061,13 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v12, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v12, v10, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v12
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1411,14 +1079,12 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va,
 define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v10, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1426,15 +1092,13 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v10, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v10, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1442,14 +1106,12 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI37_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI37_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1457,15 +1119,13 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v10, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v10, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v10, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v10, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v10, v10, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v10, v8, v0.t
@@ -1477,18 +1137,12 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
 define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v16
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v12, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1496,19 +1150,13 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v16, v0
-; RV64ZVFH-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v16
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1516,18 +1164,12 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v16, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI38_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI38_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1535,19 +1177,13 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v16, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v16, v12, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v16
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1559,14 +1195,12 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va,
 define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v12, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v12, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1574,15 +1208,13 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v12, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v12, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1590,14 +1222,12 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI39_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI39_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1605,15 +1235,13 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v12, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v12, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v12, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v12, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v12, v12, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v12, v8, v0.t
@@ -1625,18 +1253,12 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
 define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1644,19 +1266,13 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1664,18 +1280,12 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI40_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI40_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1683,19 +1293,13 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1707,14 +1311,12 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va,
 define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1722,15 +1324,13 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1738,14 +1338,12 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI41_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI41_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1753,15 +1351,13 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1773,18 +1369,12 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
 define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFH-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v24
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v16, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1792,19 +1382,13 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v24, v0
-; RV64ZVFH-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v24
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1812,18 +1396,12 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v24, v0
 ; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI42_0)
 ; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI42_0)(a0)
-; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1831,19 +1409,13 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v24, v0
-; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v24, v16, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v24
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1855,14 +1427,12 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va,
 define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v16, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1870,15 +1440,13 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v16, v8
 ; RV64ZVFH-NEXT:    li a0, 1075
 ; RV64ZVFH-NEXT:    slli a0, a0, 52
 ; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1886,14 +1454,12 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    lui a1, %hi(.LCPI43_0)
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a1)
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI43_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1901,15 +1467,13 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v16, v8
 ; RV64ZVFHMIN-NEXT:    li a0, 1075
 ; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
 ; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v16, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v16, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v16, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v16, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v16, v8, v0.t
@@ -1923,184 +1487,88 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
 define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFH-NEXT:    vmv1r.v v7, v0
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFH-NEXT:    srli a3, a1, 3
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFH-NEXT:    sub a2, a0, a1
-; RV32ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFH-NEXT:    sltu a3, a0, a2
-; RV32ZVFH-NEXT:    addi a3, a3, -1
-; RV32ZVFH-NEXT:    and a2, a3, a2
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a2, 1
-; RV32ZVFH-NEXT:    vmv1r.v v0, v6
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfabs.v v24, v8
+; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB44_2:
-; RV32ZVFH-NEXT:    vmv1r.v v0, v7
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFH-NEXT:    fsrmi a0, 1
+; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFH-NEXT:    vmv1r.v v7, v0
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    srli a3, a1, 3
-; RV64ZVFH-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a2, 1
-; RV64ZVFH-NEXT:    vmv1r.v v0, v6
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB44_2:
-; RV64ZVFH-NEXT:    vmv1r.v v0, v7
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFH-NEXT:    fsrmi a0, 1
+; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV32ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI44_0)
-; RV32ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a2)
-; RV32ZVFHMIN-NEXT:    sub a2, a0, a1
-; RV32ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV32ZVFHMIN-NEXT:    sltu a3, a0, a2
-; RV32ZVFHMIN-NEXT:    addi a3, a3, -1
-; RV32ZVFHMIN-NEXT:    and a2, a3, a2
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 1
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI44_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB44_2:
-; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; RV64ZVFHMIN-NEXT:    vmv1r.v v7, v0
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    srli a3, a1, 3
-; RV64ZVFHMIN-NEXT:    vslidedown.vx v6, v0, a3
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v6, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 1
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v6
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB44_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB44_2:
-; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5, v0.t
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x double> %v
@@ -2109,136 +1577,88 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
 define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
 ; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked:
 ; RV32ZVFH:       # %bb.0:
-; RV32ZVFH-NEXT:    csrr a1, vlenb
-; RV32ZVFH-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFH-NEXT:    sub a3, a0, a1
-; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFH-NEXT:    sltu a2, a0, a3
-; RV32ZVFH-NEXT:    addi a2, a2, -1
-; RV32ZVFH-NEXT:    and a2, a2, a3
-; RV32ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFH-NEXT:    vfabs.v v24, v16
-; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    fsrmi a2, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFH-NEXT:    fsrm a2
-; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFH-NEXT:  # %bb.1:
-; RV32ZVFH-NEXT:    mv a0, a1
-; RV32ZVFH-NEXT:  .LBB45_2:
-; RV32ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFH-NEXT:    vfabs.v v24, v8
 ; RV32ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFH-NEXT:    fsrmi a0, 1
-; RV32ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFH-NEXT:    fsrm a0
+; RV32ZVFH-NEXT:    vfabs.v v24, v16
+; RV32ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT:    vmv1r.v v0, v7
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFH-NEXT:    ret
 ;
 ; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked:
 ; RV64ZVFH:       # %bb.0:
-; RV64ZVFH-NEXT:    csrr a1, vlenb
-; RV64ZVFH-NEXT:    li a2, 1075
-; RV64ZVFH-NEXT:    sub a3, a0, a1
-; RV64ZVFH-NEXT:    slli a2, a2, 52
-; RV64ZVFH-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFH-NEXT:    sltu a2, a0, a3
-; RV64ZVFH-NEXT:    addi a2, a2, -1
-; RV64ZVFH-NEXT:    and a2, a2, a3
-; RV64ZVFH-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFH-NEXT:    vfabs.v v24, v16
-; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    fsrmi a2, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFH-NEXT:    fsrm a2
-; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFH-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFH-NEXT:  # %bb.1:
-; RV64ZVFH-NEXT:    mv a0, a1
-; RV64ZVFH-NEXT:  .LBB45_2:
-; RV64ZVFH-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFH-NEXT:    vfabs.v v24, v8
+; RV64ZVFH-NEXT:    li a0, 1075
+; RV64ZVFH-NEXT:    slli a0, a0, 52
+; RV64ZVFH-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFH-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFH-NEXT:    fsrmi a0, 1
-; RV64ZVFH-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFH-NEXT:    fsrm a0
+; RV64ZVFH-NEXT:    vfabs.v v24, v16
+; RV64ZVFH-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFH-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT:    vmv1r.v v0, v7
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFH-NEXT:    ret
 ;
 ; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked:
 ; RV32ZVFHMIN:       # %bb.0:
-; RV32ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV32ZVFHMIN-NEXT:    lui a2, %hi(.LCPI45_0)
-; RV32ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a2)
-; RV32ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV32ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV32ZVFHMIN-NEXT:    and a2, a2, a3
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a2, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a2
-; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV32ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV32ZVFHMIN-NEXT:  # %bb.1:
-; RV32ZVFHMIN-NEXT:    mv a0, a1
-; RV32ZVFHMIN-NEXT:  .LBB45_2:
-; RV32ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    lui a0, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT:    fld fa5, %lo(.LCPI45_0)(a0)
+; RV32ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV32ZVFHMIN-NEXT:    vfabs.v v24, v8
 ; RV32ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV32ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV32ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV32ZVFHMIN-NEXT:    fsrm a0
+; RV32ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV32ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV32ZVFHMIN-NEXT:    ret
 ;
 ; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked:
 ; RV64ZVFHMIN:       # %bb.0:
-; RV64ZVFHMIN-NEXT:    csrr a1, vlenb
-; RV64ZVFHMIN-NEXT:    li a2, 1075
-; RV64ZVFHMIN-NEXT:    sub a3, a0, a1
-; RV64ZVFHMIN-NEXT:    slli a2, a2, 52
-; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a2
-; RV64ZVFHMIN-NEXT:    sltu a2, a0, a3
-; RV64ZVFHMIN-NEXT:    addi a2, a2, -1
-; RV64ZVFHMIN-NEXT:    and a2, a2, a3
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
-; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
-; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a2, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a2
-; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
-; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
-; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
-; RV64ZVFHMIN-NEXT:    bltu a0, a1, .LBB45_2
-; RV64ZVFHMIN-NEXT:  # %bb.1:
-; RV64ZVFHMIN-NEXT:    mv a0, a1
-; RV64ZVFHMIN-NEXT:  .LBB45_2:
-; RV64ZVFHMIN-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; RV64ZVFHMIN-NEXT:    vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT:    li a0, 1075
+; RV64ZVFHMIN-NEXT:    slli a0, a0, 52
+; RV64ZVFHMIN-NEXT:    fmv.d.x fa5, a0
 ; RV64ZVFHMIN-NEXT:    vmflt.vf v0, v24, fa5
-; RV64ZVFHMIN-NEXT:    fsrmi a0, 1
-; RV64ZVFHMIN-NEXT:    vfcvt.x.f.v v24, v8, v0.t
-; RV64ZVFHMIN-NEXT:    fsrm a0
+; RV64ZVFHMIN-NEXT:    vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT:    vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v8, v0.t
 ; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
 ; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64ZVFHMIN-NEXT:    vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT:    vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT:    vfcvt.rtz.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT:    vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT:    vfsgnj.vv v16, v24, v16, v0.t
 ; RV64ZVFHMIN-NEXT:    ret
   %v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x double> %v


        


More information about the llvm-commits mailing list