[llvm] [RISCV] Avoid vl toggles when lowering vector_splice/experimental_vp_splice (PR #146746)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 4 05:51:23 PDT 2025


https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/146746

>From f0d5905d3e47f8bfb82eaf5f3d98cc0d2d204aad Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Wed, 2 Jul 2025 17:26:27 +0100
Subject: [PATCH 1/2] [RISCV] Avoid vl toggles when lowering
 vector_splice/experimental_vp_splice

When vectorizing a loop with a fixed-order recurrence we use a splice, which gets lowered to a vslidedown and vslideup pair.

However with the way we lower it today we end up with extra vl toggles in the loop, especially with EVL tail folding, e.g:

    .LBB0_5:                                # %vector.body
                                            # =>This Inner Loop Header: Depth=1
    	sub	a5, a2, a3
    	sh2add	a6, a3, a1
    	zext.w	a7, a4
    	vsetvli	a4, a5, e8, mf2, ta, ma
    	vle32.v	v10, (a6)
    	addi	a7, a7, -1
    	vsetivli	zero, 1, e32, m2, ta, ma
    	vslidedown.vx	v8, v8, a7
    	sh2add	a6, a3, a0
    	vsetvli	zero, a5, e32, m2, ta, ma
    	vslideup.vi	v8, v10, 1
    	vadd.vv	v8, v10, v8
    	add	a3, a3, a4
    	vse32.v	v8, (a6)
    	vmv2r.v	v8, v10
    	bne	a3, a2, .LBB0_5

Because the vslideup overwrites all but UpOffset elements from the vslidedown, we currently set the vslidedown's AVL to said offset.

But in the vslideup we use either VLMAX or the EVL which causes a toggle.

This increases the AVL of the vslidedown so it matches vslideup, even if the extra elements are overridden, to avoid the toggle.

This is operating under the assumption that a vl toggle is more expensive than performing the vslidedown at a higher vl on the average microarchitecture.

If we wanted to aggressively optimise for vl at the expense of introducing more toggles we could probably look at doing this in RISCVVLOptimizer.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   5 +-
 .../RISCV/rvv/fixed-vectors-vp-splice.ll      | 112 ++--
 llvm/test/CodeGen/RISCV/rvv/vector-splice.ll  | 512 +++++++-----------
 .../RISCV/rvv/vp-splice-mask-fixed-vectors.ll |  48 +-
 .../RISCV/rvv/vp-splice-mask-vectors.ll       |  84 ++-
 llvm/test/CodeGen/RISCV/rvv/vp-splice.ll      | 126 ++---
 6 files changed, 355 insertions(+), 532 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 326dd7149ef96..989a2cd237262 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -12331,7 +12331,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
 
   SDValue SlideDown =
       getVSlidedown(DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1,
-                    DownOffset, TrueMask, UpOffset);
+                    DownOffset, TrueMask, DAG.getRegister(RISCV::X0, XLenVT));
   return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
                      TrueMask, DAG.getRegister(RISCV::X0, XLenVT),
                      RISCVVType::TAIL_AGNOSTIC);
@@ -13354,8 +13354,7 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
 
   if (ImmValue != 0)
     Op1 = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
-                        DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask,
-                        UpOffset);
+                        DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask, EVL2);
   SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, Op1, Op2,
                                UpOffset, Mask, EVL2, RISCVVType::TAIL_AGNOSTIC);
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
index 8160e62a43106..79fbdb007a70c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
@@ -7,10 +7,9 @@
 define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -22,9 +21,8 @@ define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64>
 ; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -46,10 +44,10 @@ define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb,
 define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v2i64_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -59,10 +57,9 @@ define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x
 define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -74,9 +71,8 @@ define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32>
 ; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -87,10 +83,10 @@ define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32>
 define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v4i32_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
@@ -100,10 +96,9 @@ define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x
 define <8 x i16> @test_vp_splice_v8i16(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -115,9 +110,8 @@ define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16>
 ; CHECK-LABEL: test_vp_splice_v8i16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -128,10 +122,10 @@ define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16>
 define <8 x i16> @test_vp_splice_v8i16_masked(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8i16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
@@ -141,10 +135,9 @@ define <8 x i16> @test_vp_splice_v8i16_masked(<8 x i16> %va, <8 x i16> %vb, <8 x
 define <16 x i8> @test_vp_splice_v16i8(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v16i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -156,9 +149,8 @@ define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8>
 ; CHECK-LABEL: test_vp_splice_v16i8_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -169,10 +161,10 @@ define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8>
 define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v16i8_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
@@ -182,10 +174,9 @@ define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16
 define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -197,9 +188,8 @@ define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x
 ; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -210,10 +200,10 @@ define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x
 define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v2f64_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -223,10 +213,9 @@ define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double>
 define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -238,9 +227,8 @@ define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x f
 ; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -251,10 +239,10 @@ define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x f
 define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v4f32_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
@@ -264,10 +252,9 @@ define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb
 define <8 x half> @test_vp_splice_v8f16(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -279,9 +266,8 @@ define <8 x half> @test_vp_splice_v8f16_negative_offset(<8 x half> %va, <8 x hal
 ; CHECK-LABEL: test_vp_splice_v8f16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -292,10 +278,10 @@ define <8 x half> @test_vp_splice_v8f16_negative_offset(<8 x half> %va, <8 x hal
 define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8f16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
@@ -364,10 +350,9 @@ define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half>
 define <8 x bfloat> @test_vp_splice_v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
 
@@ -379,9 +364,8 @@ define <8 x bfloat> @test_vp_splice_v8bf16_negative_offset(<8 x bfloat> %va, <8
 ; CHECK-LABEL: test_vp_splice_v8bf16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
 
@@ -392,10 +376,10 @@ define <8 x bfloat> @test_vp_splice_v8bf16_negative_offset(<8 x bfloat> %va, <8
 define <8 x bfloat> @test_vp_splice_v8bf16_masked(<8 x bfloat> %va, <8 x bfloat> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_v8bf16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 90d798b167cfc..87b6442c38a42 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -42,10 +42,8 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    addi a0, a0, -1
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -90,10 +88,8 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    addi a0, a0, -3
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -138,10 +134,8 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    addi a0, a0, -7
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -184,10 +178,8 @@ define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale
 ; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    addi a0, a0, -15
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -232,10 +224,8 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vsc
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
 ; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    addi a0, a0, -31
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
@@ -272,19 +262,19 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vsc
 ; CHECK-LABEL: splice_nxv32i1_offset_max:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 63
+; CHECK-NEXT:    li a0, 63
 ; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v16, v16, a1
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    vslideup.vx v16, v8, a0
-; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
+; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 63)
@@ -319,19 +309,19 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vsc
 ; CHECK-LABEL: splice_nxv64i1_offset_max:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmv.v.i v16, 0
+; CHECK-NEXT:    vmv1r.v v9, v0
+; CHECK-NEXT:    vmv1r.v v0, v8
+; CHECK-NEXT:    vmv.v.i v24, 0
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmerge.vim v16, v24, 1, v0
+; CHECK-NEXT:    vmv1r.v v0, v9
+; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 127
-; CHECK-NEXT:    vmerge.vim v24, v16, 1, v0
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -127
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v24, v24, a1
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
-; CHECK-NEXT:    vslideup.vx v24, v8, a0
-; CHECK-NEXT:    vand.vi v8, v24, 1
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
+; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 127)
@@ -368,9 +358,8 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_min(<vscale x 1 x i8> %a, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -2)
@@ -380,12 +369,11 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_min(<vscale x 1 x i8> %a, <vscale
 define <vscale x 1 x i8> @splice_nxv1i8_offset_max(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv1i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 1)
@@ -422,9 +410,8 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_min(<vscale x 2 x i8> %a, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -4)
@@ -434,12 +421,11 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_min(<vscale x 2 x i8> %a, <vscale
 define <vscale x 2 x i8> @splice_nxv2i8_offset_max(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv2i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 3)
@@ -476,9 +462,8 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_min(<vscale x 4 x i8> %a, <vscale
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -8)
@@ -488,12 +473,11 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_min(<vscale x 4 x i8> %a, <vscale
 define <vscale x 4 x i8> @splice_nxv4i8_offset_max(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv4i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 7)
@@ -528,9 +512,8 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_min(<vscale x 8 x i8> %a, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -16)
@@ -542,9 +525,8 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_max(<vscale x 8 x i8> %a, <vscale
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 15)
@@ -567,9 +549,8 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_negone(<vscale x 16 x i8> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
@@ -582,11 +563,10 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_min(<vscale x 16 x i8> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v10, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -32)
   ret <vscale x 16 x i8> %res
@@ -595,12 +575,11 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_min(<vscale x 16 x i8> %a, <vsc
 define <vscale x 16 x i8> @splice_nxv16i8_offset_max(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv16i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 31)
@@ -623,9 +602,8 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_negone(<vscale x 32 x i8> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -1)
@@ -638,11 +616,10 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_min(<vscale x 32 x i8> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    li a1, 64
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    li a0, 64
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -64)
   ret <vscale x 32 x i8> %res
@@ -651,13 +628,12 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_min(<vscale x 32 x i8> %a, <vsc
 define <vscale x 32 x i8> @splice_nxv32i8_offset_max(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv32i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    li a1, 63
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 63)
@@ -680,9 +656,8 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_negone(<vscale x 64 x i8> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -1)
@@ -695,11 +670,10 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_min(<vscale x 64 x i8> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -128
-; CHECK-NEXT:    li a1, 128
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 128
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -128)
   ret <vscale x 64 x i8> %res
@@ -708,13 +682,12 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_min(<vscale x 64 x i8> %a, <vsc
 define <vscale x 64 x i8> @splice_nxv64i8_offset_max(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
 ; CHECK-LABEL: splice_nxv64i8_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -127
-; CHECK-NEXT:    li a1, 127
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 127)
@@ -751,9 +724,8 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_min(<vscale x 1 x i16> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -2)
@@ -763,12 +735,11 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_min(<vscale x 1 x i16> %a, <vsc
 define <vscale x 1 x i16> @splice_nxv1i16_offset_max(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
 ; CHECK-LABEL: splice_nxv1i16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 1)
@@ -805,9 +776,8 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_min(<vscale x 2 x i16> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -4)
@@ -817,12 +787,11 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_min(<vscale x 2 x i16> %a, <vsc
 define <vscale x 2 x i16> @splice_nxv2i16_offset_max(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
 ; CHECK-LABEL: splice_nxv2i16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 3)
@@ -859,9 +828,8 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_min(<vscale x 4 x i16> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -8)
@@ -871,12 +839,11 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_min(<vscale x 4 x i16> %a, <vsc
 define <vscale x 4 x i16> @splice_nxv4i16_offset_max(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
 ; CHECK-LABEL: splice_nxv4i16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 7)
@@ -898,9 +865,8 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_negone(<vscale x 8 x i16> %a, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
@@ -912,9 +878,8 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_min(<vscale x 8 x i16> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -16)
@@ -926,9 +891,8 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_max(<vscale x 8 x i16> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 15)
@@ -951,9 +915,8 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_negone(<vscale x 16 x i16> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -1)
@@ -966,11 +929,10 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_min(<vscale x 16 x i16> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -32)
   ret <vscale x 16 x i16> %res
@@ -979,12 +941,11 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_min(<vscale x 16 x i16> %a, <
 define <vscale x 16 x i16> @splice_nxv16i16_offset_max(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
 ; CHECK-LABEL: splice_nxv16i16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 31)
@@ -1007,9 +968,8 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_negone(<vscale x 32 x i16> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -1)
@@ -1022,11 +982,10 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_min(<vscale x 32 x i16> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    li a1, 64
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 64
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -64)
   ret <vscale x 32 x i16> %res
@@ -1035,13 +994,12 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_min(<vscale x 32 x i16> %a, <
 define <vscale x 32 x i16> @splice_nxv32i16_offset_max(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
 ; CHECK-LABEL: splice_nxv32i16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    li a1, 63
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 63)
@@ -1078,9 +1036,8 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_min(<vscale x 1 x i32> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -2)
@@ -1090,12 +1047,11 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_min(<vscale x 1 x i32> %a, <vsc
 define <vscale x 1 x i32> @splice_nxv1i32_offset_max(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
 ; CHECK-LABEL: splice_nxv1i32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 1)
@@ -1132,9 +1088,8 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_min(<vscale x 2 x i32> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -4)
@@ -1144,12 +1099,11 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_min(<vscale x 2 x i32> %a, <vsc
 define <vscale x 2 x i32> @splice_nxv2i32_offset_max(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
 ; CHECK-LABEL: splice_nxv2i32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 3)
@@ -1172,9 +1126,8 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_negone(<vscale x 4 x i32> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
@@ -1187,9 +1140,8 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_min(<vscale x 4 x i32> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -8)
@@ -1199,12 +1151,11 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_min(<vscale x 4 x i32> %a, <vsc
 define <vscale x 4 x i32> @splice_nxv4i32_offset_max(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
 ; CHECK-LABEL: splice_nxv4i32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 7)
@@ -1226,9 +1177,8 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_negone(<vscale x 8 x i32> %a, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -1)
@@ -1240,9 +1190,8 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_min(<vscale x 8 x i32> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -16)
@@ -1254,9 +1203,8 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_max(<vscale x 8 x i32> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 15)
@@ -1279,9 +1227,8 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_negone(<vscale x 16 x i32> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -1)
@@ -1294,11 +1241,10 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_min(<vscale x 16 x i32> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -32)
   ret <vscale x 16 x i32> %res
@@ -1307,12 +1253,11 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_min(<vscale x 16 x i32> %a, <
 define <vscale x 16 x i32> @splice_nxv16i32_offset_max(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
 ; CHECK-LABEL: splice_nxv16i32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 31)
@@ -1349,9 +1294,8 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_min(<vscale x 1 x i64> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -2)
@@ -1361,12 +1305,11 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_min(<vscale x 1 x i64> %a, <vsc
 define <vscale x 1 x i64> @splice_nxv1i64_offset_max(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
 ; CHECK-LABEL: splice_nxv1i64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 1)
@@ -1389,9 +1332,8 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_negone(<vscale x 2 x i64> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
@@ -1404,9 +1346,8 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_min(<vscale x 2 x i64> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -4)
@@ -1416,12 +1357,11 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_min(<vscale x 2 x i64> %a, <vsc
 define <vscale x 2 x i64> @splice_nxv2i64_offset_max(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
 ; CHECK-LABEL: splice_nxv2i64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 3)
@@ -1444,9 +1384,8 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_negone(<vscale x 4 x i64> %a, <
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -1)
@@ -1459,9 +1398,8 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_min(<vscale x 4 x i64> %a, <vsc
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -8)
@@ -1471,12 +1409,11 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_min(<vscale x 4 x i64> %a, <vsc
 define <vscale x 4 x i64> @splice_nxv4i64_offset_max(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
 ; CHECK-LABEL: splice_nxv4i64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 7)
@@ -1498,9 +1435,8 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_negone(<vscale x 8 x i64> %a, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -1)
@@ -1512,9 +1448,8 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_min(<vscale x 8 x i64> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -16)
@@ -1526,9 +1461,8 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_max(<vscale x 8 x i64> %a, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 15)
@@ -1565,9 +1499,8 @@ define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_min(<vscale x 1 x bfloat> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x bfloat> @llvm.vector.splice.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b, i32 -2)
@@ -1577,12 +1510,11 @@ define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_min(<vscale x 1 x bfloat> %
 define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_max(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b) #0 {
 ; CHECK-LABEL: splice_nxv1bf16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x bfloat> @llvm.vector.splice.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b, i32 1)
@@ -1619,9 +1551,8 @@ define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_min(<vscale x 2 x bfloat> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x bfloat> @llvm.vector.splice.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b, i32 -4)
@@ -1631,12 +1562,11 @@ define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_min(<vscale x 2 x bfloat> %
 define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_max(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) #0 {
 ; CHECK-LABEL: splice_nxv2bf16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x bfloat> @llvm.vector.splice.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b, i32 3)
@@ -1673,9 +1603,8 @@ define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_min(<vscale x 4 x bfloat> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x bfloat> @llvm.vector.splice.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b, i32 -8)
@@ -1685,12 +1614,11 @@ define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_min(<vscale x 4 x bfloat> %
 define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_max(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) #0 {
 ; CHECK-LABEL: splice_nxv4bf16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x bfloat> @llvm.vector.splice.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b, i32 7)
@@ -1712,9 +1640,8 @@ define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_negone(<vscale x 8 x bfloat
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 -1)
@@ -1726,9 +1653,8 @@ define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_min(<vscale x 8 x bfloat> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 -16)
@@ -1740,9 +1666,8 @@ define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_max(<vscale x 8 x bfloat> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 15)
@@ -1765,9 +1690,8 @@ define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_negone(<vscale x 16 x bfl
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 -1)
@@ -1780,11 +1704,10 @@ define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_min(<vscale x 16 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 -32)
   ret <vscale x 16 x bfloat> %res
@@ -1793,12 +1716,11 @@ define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_min(<vscale x 16 x bfloat
 define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_max(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b) #0 {
 ; CHECK-LABEL: splice_nxv16bf16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 31)
@@ -1821,9 +1743,8 @@ define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_negone(<vscale x 32 x bfl
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 -1)
@@ -1836,11 +1757,10 @@ define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_min(<vscale x 32 x bfloat
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    li a1, 64
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 64
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 -64)
   ret <vscale x 32 x bfloat> %res
@@ -1849,13 +1769,12 @@ define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_min(<vscale x 32 x bfloat
 define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_max(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) #0 {
 ; CHECK-LABEL: splice_nxv32bf16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    li a1, 63
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 63)
@@ -1892,9 +1811,8 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_min(<vscale x 1 x half> %a, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -2)
@@ -1904,12 +1822,11 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_min(<vscale x 1 x half> %a, <v
 define <vscale x 1 x half> @splice_nxv1f16_offset_max(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
 ; CHECK-LABEL: splice_nxv1f16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 1)
@@ -1946,9 +1863,8 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_min(<vscale x 2 x half> %a, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -4)
@@ -1958,12 +1874,11 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_min(<vscale x 2 x half> %a, <v
 define <vscale x 2 x half> @splice_nxv2f16_offset_max(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
 ; CHECK-LABEL: splice_nxv2f16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 3)
@@ -2000,9 +1915,8 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_min(<vscale x 4 x half> %a, <v
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -8)
@@ -2012,12 +1926,11 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_min(<vscale x 4 x half> %a, <v
 define <vscale x 4 x half> @splice_nxv4f16_offset_max(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
 ; CHECK-LABEL: splice_nxv4f16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 7)
@@ -2039,9 +1952,8 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_negone(<vscale x 8 x half> %a,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
@@ -2053,9 +1965,8 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_min(<vscale x 8 x half> %a, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -16)
@@ -2067,9 +1978,8 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_max(<vscale x 8 x half> %a, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 15)
@@ -2092,9 +2002,8 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_negone(<vscale x 16 x half>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -1)
@@ -2107,11 +2016,10 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_min(<vscale x 16 x half> %a,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v12, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -32)
   ret <vscale x 16 x half> %res
@@ -2120,12 +2028,11 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_min(<vscale x 16 x half> %a,
 define <vscale x 16 x half> @splice_nxv16f16_offset_max(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
 ; CHECK-LABEL: splice_nxv16f16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 31)
@@ -2148,9 +2055,8 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_negone(<vscale x 32 x half>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -1)
@@ -2163,11 +2069,10 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_min(<vscale x 32 x half> %a,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    li a1, 64
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 64
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -64)
   ret <vscale x 32 x half> %res
@@ -2176,13 +2081,12 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_min(<vscale x 32 x half> %a,
 define <vscale x 32 x half> @splice_nxv32f16_offset_max(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
 ; CHECK-LABEL: splice_nxv32f16_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a0, 63
+; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    li a1, 63
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 63)
@@ -2219,9 +2123,8 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_min(<vscale x 1 x float> %a,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -2)
@@ -2231,12 +2134,11 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_min(<vscale x 1 x float> %a,
 define <vscale x 1 x float> @splice_nxv1f32_offset_max(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
 ; CHECK-LABEL: splice_nxv1f32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 1)
@@ -2273,9 +2175,8 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_min(<vscale x 2 x float> %a,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -4)
@@ -2285,12 +2186,11 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_min(<vscale x 2 x float> %a,
 define <vscale x 2 x float> @splice_nxv2f32_offset_max(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
 ; CHECK-LABEL: splice_nxv2f32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 3)
@@ -2313,9 +2213,8 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_negone(<vscale x 4 x float> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
@@ -2328,9 +2227,8 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_min(<vscale x 4 x float> %a,
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -8)
@@ -2340,12 +2238,11 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_min(<vscale x 4 x float> %a,
 define <vscale x 4 x float> @splice_nxv4f32_offset_max(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
 ; CHECK-LABEL: splice_nxv4f32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 7)
@@ -2367,9 +2264,8 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_negone(<vscale x 8 x float> %
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -1)
@@ -2381,9 +2277,8 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_min(<vscale x 8 x float> %a,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -16)
@@ -2395,9 +2290,8 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_max(<vscale x 8 x float> %a,
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 15)
@@ -2420,9 +2314,8 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_negone(<vscale x 16 x float
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -1)
@@ -2435,11 +2328,10 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_min(<vscale x 16 x float> %
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslideup.vx v8, v16, a1
+; CHECK-NEXT:    li a0, 32
+; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -32)
   ret <vscale x 16 x float> %res
@@ -2448,12 +2340,11 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_min(<vscale x 16 x float> %
 define <vscale x 16 x float> @splice_nxv16f32_offset_max(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
 ; CHECK-LABEL: splice_nxv16f32_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 31
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 31)
@@ -2490,9 +2381,8 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_min(<vscale x 1 x double> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -2)
@@ -2502,12 +2392,11 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_min(<vscale x 1 x double> %a
 define <vscale x 1 x double> @splice_nxv1f64_offset_max(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
 ; CHECK-LABEL: splice_nxv1f64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 3
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 1)
@@ -2530,9 +2419,8 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_negone(<vscale x 2 x double>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
@@ -2545,9 +2433,8 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_min(<vscale x 2 x double> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v10, 4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -4)
@@ -2557,12 +2444,11 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_min(<vscale x 2 x double> %a
 define <vscale x 2 x double> @splice_nxv2f64_offset_max(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
 ; CHECK-LABEL: splice_nxv2f64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 3)
@@ -2585,9 +2471,8 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_negone(<vscale x 4 x double>
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -1)
@@ -2600,9 +2485,8 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_min(<vscale x 4 x double> %a
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v12, 8
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -8)
@@ -2612,12 +2496,11 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_min(<vscale x 4 x double> %a
 define <vscale x 4 x double> @splice_nxv4f64_offset_max(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
 ; CHECK-LABEL: splice_nxv4f64_offset_max:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 7
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 1
 ; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 7)
@@ -2639,9 +2522,8 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_negone(<vscale x 8 x double>
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 1
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -1)
@@ -2653,9 +2535,8 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_min(<vscale x 8 x double> %a
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vslideup.vi v8, v16, 16
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -16)
@@ -2667,9 +2548,8 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_max(<vscale x 8 x double> %a
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 15
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 15)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index 745cec4e7c4f6..ad2436713ead8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -19,10 +19,9 @@ define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -44,9 +43,8 @@ define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb,
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -69,9 +67,9 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -92,10 +90,9 @@ define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -117,9 +114,8 @@ define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb,
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -142,9 +138,9 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -165,10 +161,9 @@ define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %ev
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -190,9 +185,8 @@ define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb,
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -215,9 +209,9 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -238,10 +232,9 @@ define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -263,9 +256,8 @@ define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1>
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -288,9 +280,9 @@ define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 3b0b183537468..2155bc02bf9bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -22,10 +22,9 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -47,9 +46,8 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -72,9 +70,9 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -95,10 +93,9 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -120,9 +117,8 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -145,9 +141,9 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -168,10 +164,9 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -193,9 +188,8 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -218,9 +212,9 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -241,10 +235,9 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
 ; CHECK-NEXT:    vmv.v.i v10, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v9, v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -266,9 +259,8 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v9, v9, a0
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    vmsne.vi v0, v9, 0
 ; CHECK-NEXT:    ret
@@ -291,9 +283,9 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
 ; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
@@ -314,10 +306,9 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
 ; CHECK-NEXT:    vmv.v.i v12, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -339,9 +330,8 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -364,9 +354,9 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
 ; CHECK-NEXT:    vmerge.vim v10, v14, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v10, v12, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmsne.vi v8, v10, 0, v0.t
@@ -388,10 +378,9 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
 ; CHECK-NEXT:    vmv.v.i v16, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v12, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -413,9 +402,8 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v12, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -438,9 +426,9 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
 ; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v16, v16, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, mu
 ; CHECK-NEXT:    vslideup.vx v16, v12, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmsne.vi v8, v16, 0, v0.t
@@ -462,10 +450,9 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
 ; CHECK-NEXT:    vmv.v.i v24, 0
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v16, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -487,9 +474,8 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
 ; CHECK-NEXT:    vmv1r.v v0, v9
 ; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v16, 5
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
@@ -512,9 +498,9 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
 ; CHECK-NEXT:    vmerge.vim v24, v24, 1, v0
 ; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v24, v24, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vsetvli zero, zero, e8, m8, ta, mu
 ; CHECK-NEXT:    vslideup.vx v24, v16, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vmsne.vi v8, v24, 0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index 9c8c5da75ff7c..8e9fab5f156a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -7,10 +7,9 @@
 define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -21,9 +20,8 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x
 ; CHECK-LABEL: test_vp_splice_nxv2i64_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v10, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -43,10 +41,10 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_zero_offset(<vscale x 2 x i64>
 define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2i64_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -56,10 +54,9 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va,
 define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv1i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -70,9 +67,8 @@ define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x
 ; CHECK-LABEL: test_vp_splice_nxv1i64_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -82,10 +78,10 @@ define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x
 define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv1i64_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
@@ -95,10 +91,9 @@ define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va,
 define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -109,9 +104,8 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x
 ; CHECK-LABEL: test_vp_splice_nxv2i32_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -121,10 +115,10 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x
 define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2i32_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -134,10 +128,9 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va,
 define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -148,9 +141,8 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x
 ; CHECK-LABEL: test_vp_splice_nxv4i16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -160,10 +152,10 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x
 define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv4i16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb)
@@ -173,10 +165,9 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va,
 define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv8i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -187,9 +178,8 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8
 ; CHECK-LABEL: test_vp_splice_nxv8i8_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -199,10 +189,10 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8
 define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv8i8_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb)
@@ -212,10 +202,9 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <v
 define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -226,9 +215,8 @@ define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1
 ; CHECK-LABEL: test_vp_splice_nxv1f64_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -238,10 +226,10 @@ define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1
 define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv1f64_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
@@ -251,10 +239,9 @@ define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double
 define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -265,9 +252,8 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2
 ; CHECK-LABEL: test_vp_splice_nxv2f32_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -277,10 +263,10 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2
 define <vscale x 2 x float> @test_vp_splice_nxv2f32_masked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2f32_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -431,10 +417,9 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
 define <vscale x 2 x half> @test_vp_splice_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -445,9 +430,8 @@ define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x
 ; CHECK-LABEL: test_vp_splice_nxv2f16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -457,10 +441,10 @@ define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x
 define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2f16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
@@ -470,10 +454,9 @@ define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %v
 define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2bf16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
+; CHECK-NEXT:    addi a0, a0, -5
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -484,9 +467,8 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_negative_offset(<vscale x
 ; CHECK-LABEL: test_vp_splice_nxv2bf16_negative_offset:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetivli zero, 5, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
@@ -496,10 +478,10 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_negative_offset(<vscale x
 define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
 ; CHECK-LABEL: test_vp_splice_nxv2bf16_masked:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT:    addi a0, a0, -5
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
 ; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
 ; CHECK-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)

>From 141bb1fe267bf56afd06f492e27834463cedaffb Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Fri, 4 Jul 2025 13:50:54 +0100
Subject: [PATCH 2/2] Add +minimize-vl

---
 llvm/lib/Target/RISCV/RISCVFeatures.td        |    4 +
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |    9 +-
 llvm/lib/Target/RISCV/RISCVProcessors.td      |    3 +-
 .../RISCV/rvv/fixed-vectors-vp-splice.ll      |  588 ++-
 llvm/test/CodeGen/RISCV/rvv/vector-splice.ll  | 3558 ++++++++++++-----
 .../RISCV/rvv/vp-splice-mask-fixed-vectors.ll |  676 +++-
 .../RISCV/rvv/vp-splice-mask-vectors.ll       | 1184 ++++--
 llvm/test/CodeGen/RISCV/rvv/vp-splice.ll      |  647 ++-
 8 files changed, 4572 insertions(+), 2097 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 6e103dd7d8c44..d6cb1aa95ad5b 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1695,6 +1695,10 @@ foreach nf = {2-8} in
                        "true", "vlseg"#nf#"eN.v and vsseg"#nf#"eN.v are "
                        "implemented as a wide memory op and shuffle">;
 
+def TuneMinimizeVL
+    : SubtargetFeature<"minimize-vl", "MinimizeVL", "true",
+                       "Prefer reducing vl even it requires more vsetvli instructions">;
+
 def Experimental
    : SubtargetFeature<"experimental", "HasExperimental",
                       "true", "Experimental intrinsics">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 989a2cd237262..1ba8aba13f8d3 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -12329,9 +12329,9 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
 
   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
 
-  SDValue SlideDown =
-      getVSlidedown(DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1,
-                    DownOffset, TrueMask, DAG.getRegister(RISCV::X0, XLenVT));
+  SDValue SlideDown = getVSlidedown(
+      DAG, Subtarget, DL, VecVT, DAG.getUNDEF(VecVT), V1, DownOffset, TrueMask,
+      Subtarget.minimizeVL() ? UpOffset : DAG.getRegister(RISCV::X0, XLenVT));
   return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset,
                      TrueMask, DAG.getRegister(RISCV::X0, XLenVT),
                      RISCVVType::TAIL_AGNOSTIC);
@@ -13354,7 +13354,8 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
 
   if (ImmValue != 0)
     Op1 = getVSlidedown(DAG, Subtarget, DL, ContainerVT,
-                        DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask, EVL2);
+                        DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask,
+                        Subtarget.minimizeVL() ? UpOffset : EVL2);
   SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, Op1, Op2,
                                UpOffset, Mask, EVL2, RISCVVType::TAIL_AGNOSTIC);
 
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index 57b415dc713ac..f4f31e25bbde7 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -274,7 +274,8 @@ def SIFIVE_U74 : RISCVProcessorModel<"sifive-u74",
 defvar SiFiveIntelligenceTuneFeatures = !listconcat(SiFive7TuneFeatures,
                                                     [TuneDLenFactor2,
                                                      TuneOptimizedZeroStrideLoad,
-                                                     TuneOptimizedNF2SegmentLoadStore]);
+                                                     TuneOptimizedNF2SegmentLoadStore,
+                                                     TuneMinimizeVL]);
 def SIFIVE_X280 : RISCVProcessorModel<"sifive-x280", SiFive7Model,
                                       [Feature64Bit,
                                        FeatureStdExtI,
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
index 79fbdb007a70c..ba9f950390a52 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll
@@ -1,30 +1,52 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfh,+zvfbfmin -verify-machineinstrs -riscv-v-vector-bits-min=128 \
-; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfhmin,+zvfbfmin -verify-machineinstrs -riscv-v-vector-bits-min=128 \
-; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL,ZVFH
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL,ZVFHMIN
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL,ZVFH
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL,ZVFHMIN
 
 define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i64:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i64:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i64> %v
 }
 
 define <2 x i64> @test_vp_splice_v2i64_negative_offset(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i64_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i64_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i64_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i64> %v
@@ -42,248 +64,419 @@ define <2 x i64> @test_vp_splice_v2i64_zero_offset(<2 x i64> %va, <2 x i64> %vb,
 }
 
 define <2 x i64> @test_vp_splice_v2i64_masked(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i64_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i64_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i64_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64> %va, <2 x i64> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <2 x i64> %v
 }
 
 define <4 x i32> @test_vp_splice_v4i32(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i32:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i32:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i32> %v
 }
 
 define <4 x i32> @test_vp_splice_v4i32_negative_offset(<4 x i32> %va, <4 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i32_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i32_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i32_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i32> %v
 }
 
 define <4 x i32> @test_vp_splice_v4i32_masked(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i32_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i32_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i32_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32> %va, <4 x i32> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
   ret <4 x i32> %v
 }
 
 define <8 x i16> @test_vp_splice_v8i16(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i16> %v
 }
 
 define <8 x i16> @test_vp_splice_v8i16_negative_offset(<8 x i16> %va, <8 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i16> %v
 }
 
 define <8 x i16> @test_vp_splice_v8i16_masked(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16> %va, <8 x i16> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <8 x i16> %v
 }
 
 define <16 x i8> @test_vp_splice_v16i8(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i8:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i8:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i8> %v
 }
 
 define <16 x i8> @test_vp_splice_v16i8_negative_offset(<16 x i8> %va, <16 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i8_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i8_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i8_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 -5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i8> %v
 }
 
 define <16 x i8> @test_vp_splice_v16i8_masked(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i8_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i8_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i8_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8> %va, <16 x i8> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
   ret <16 x i8> %v
 }
 
 define <2 x double> @test_vp_splice_v2f64(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2f64:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2f64:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x double> %v
 }
 
 define <2 x double> @test_vp_splice_v2f64_negative_offset(<2 x double> %va, <2 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2f64_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2f64_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2f64_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x double> %v
 }
 
 define <2 x double> @test_vp_splice_v2f64_masked(<2 x double> %va, <2 x double> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2f64_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2f64_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2f64_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %va, <2 x double> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <2 x double> %v
 }
 
 define <4 x float> @test_vp_splice_v4f32(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4f32:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4f32:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x float> %v
 }
 
 define <4 x float> @test_vp_splice_v4f32_negative_offset(<4 x float> %va, <4 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4f32_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4f32_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4f32_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x float> %v
 }
 
 define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4f32_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4f32_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4f32_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
   ret <4 x float> %v
 }
 
 define <8 x half> @test_vp_splice_v8f16(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8f16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8f16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x half> %v
 }
 
 define <8 x half> @test_vp_splice_v8f16_negative_offset(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8f16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8f16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8f16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x half> %v
 }
 
 define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8f16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8f16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8f16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <8 x half> %v
 }
@@ -335,11 +528,9 @@ define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half>
 ;
 ; ZVFHMIN-LABEL: test_vp_splice_nxv2f16_with_firstelt:
 ; ZVFHMIN:       # %bb.0:
-; ZVFHMIN-NEXT:    fmv.x.w a1, fa0
-; ZVFHMIN-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
-; ZVFHMIN-NEXT:    vmv.s.x v9, a1
-; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; ZVFHMIN-NEXT:    vslideup.vi v9, v8, 1, v0.t
+; ZVFHMIN-NEXT:    fmv.x.h a1, fa0
+; ZVFHMIN-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT:    vslide1up.vx v9, v8, a1, v0.t
 ; ZVFHMIN-NEXT:    vmv1r.v v8, v9
 ; ZVFHMIN-NEXT:    ret
   %va = insertelement <4 x half> poison, half %first, i32 0
@@ -348,40 +539,67 @@ define <4 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <4 x half>
 }
 
 define <8 x bfloat> @test_vp_splice_v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8bf16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8bf16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
 
   %v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x bfloat> %v
 }
 
 define <8 x bfloat> @test_vp_splice_v8bf16_negative_offset(<8 x bfloat> %va, <8 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8bf16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8bf16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8bf16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
 
   %v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x bfloat> %v
 }
 
 define <8 x bfloat> @test_vp_splice_v8bf16_masked(<8 x bfloat> %va, <8 x bfloat> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8bf16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8bf16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8bf16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <8 x bfloat> @llvm.experimental.vp.splice.v8bf16(<8 x bfloat> %va, <8 x bfloat> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <8 x bfloat> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index 87b6442c38a42..34b159c4b8a76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -1,8 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s
-; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s
-; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin < %s | FileCheck %s
-; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin < %s | FileCheck %s
+; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin,+minimize-vl < %s | FileCheck %s --check-prefixes=CHECK,MINVL
+; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin,+minimize-vl < %s | FileCheck %s --check-prefixes=CHECK,MINVL
+; RUN: llc -mtriple riscv32 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin,+minimize-vl < %s | FileCheck %s --check-prefixes=CHECK,MINVL
+; RUN: llc -mtriple riscv64 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin,+minimize-vl < %s | FileCheck %s --check-prefixes=CHECK,MINVL
 
 ; Tests assume VLEN=128 or vscale_range_min=2.
 
@@ -31,23 +35,43 @@ define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vsc
 }
 
 define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv1i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    vand.vi v8, v8, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i1> @llvm.vector.splice.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i32 1)
   ret <vscale x 1 x i1> %res
 }
@@ -77,23 +101,43 @@ define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vsc
 }
 
 define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv2i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    vand.vi v8, v8, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i1> @llvm.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 3)
   ret <vscale x 2 x i1> %res
 }
@@ -123,23 +167,43 @@ define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vsc
 }
 
 define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv4i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    vand.vi v8, v8, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i1> @llvm.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 7)
   ret <vscale x 4 x i1> %res
 }
@@ -168,22 +232,41 @@ define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vsc
 }
 
 define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv8i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    vmerge.vim v10, v8, 1, v0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    vand.vi v8, v8, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i1> @llvm.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 15)
   ret <vscale x 8 x i1> %res
 }
@@ -213,23 +296,43 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
 }
 
 define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv16i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    vmerge.vim v12, v10, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v10, 1, v0
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    vmerge.vim v12, v10, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v10, 1, v0
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    vmerge.vim v12, v10, 1, v0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v10, 1, v0
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    vand.vi v8, v8, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i1> @llvm.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 31)
   ret <vscale x 16 x i1> %res
 }
@@ -259,24 +362,43 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <
 }
 
 define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv32i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    li a0, 63
-; CHECK-NEXT:    vmerge.vim v16, v12, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    li a0, 63
+; NOMINVL-NEXT:    vmerge.vim v16, v12, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -63
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    li a1, 63
+; MINVL-NEXT:    vmerge.vim v16, v12, 1, v0
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -63
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v16, v16, a1
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; MINVL-NEXT:    vslideup.vx v16, v8, a0
+; MINVL-NEXT:    vand.vi v8, v16, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i1> @llvm.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 63)
   ret <vscale x 32 x i1> %res
 }
@@ -306,24 +428,43 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <
 }
 
 define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
-; CHECK-LABEL: splice_nxv64i1_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v24, 0
-; CHECK-NEXT:    li a0, 127
-; CHECK-NEXT:    vmerge.vim v16, v24, 1, v0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -127
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    vand.vi v8, v8, 1
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv64i1_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v24, 0
+; NOMINVL-NEXT:    li a0, 127
+; NOMINVL-NEXT:    vmerge.vim v16, v24, 1, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v24, 1, v0
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -127
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    vand.vi v8, v8, 1
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv64i1_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    li a1, 127
+; MINVL-NEXT:    vmerge.vim v24, v16, 1, v0
+; MINVL-NEXT:    slli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -127
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v24, v24, a1
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vmerge.vim v8, v16, 1, v0
+; MINVL-NEXT:    vslideup.vx v24, v8, a0
+; MINVL-NEXT:    vand.vi v8, v24, 1
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
   %res = call <vscale x 64 x i1> @llvm.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 127)
   ret <vscale x 64 x i1> %res
 }
@@ -353,29 +494,51 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_negone(<vscale x 1 x i8> %a, <vsc
 }
 
 define <vscale x 1 x i8> @splice_nxv1i8_offset_min(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv1i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -2)
   ret <vscale x 1 x i8> %res
 }
 
 define <vscale x 1 x i8> @splice_nxv1i8_offset_max(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv1i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 1)
   ret <vscale x 1 x i8> %res
 }
@@ -405,29 +568,51 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_negone(<vscale x 2 x i8> %a, <vsc
 }
 
 define <vscale x 2 x i8> @splice_nxv2i8_offset_min(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv2i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -4)
   ret <vscale x 2 x i8> %res
 }
 
 define <vscale x 2 x i8> @splice_nxv2i8_offset_max(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv2i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 3)
   ret <vscale x 2 x i8> %res
 }
@@ -457,29 +642,51 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_negone(<vscale x 4 x i8> %a, <vsc
 }
 
 define <vscale x 4 x i8> @splice_nxv4i8_offset_min(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv4i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -8)
   ret <vscale x 4 x i8> %res
 }
 
 define <vscale x 4 x i8> @splice_nxv4i8_offset_max(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv4i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 7)
   ret <vscale x 4 x i8> %res
 }
@@ -508,27 +715,47 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_negone(<vscale x 8 x i8> %a, <vsc
 }
 
 define <vscale x 8 x i8> @splice_nxv8i8_offset_min(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv8i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -16)
   ret <vscale x 8 x i8> %res
 }
 
 define <vscale x 8 x i8> @splice_nxv8i8_offset_max(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv8i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 15)
   ret <vscale x 8 x i8> %res
 }
@@ -544,44 +771,78 @@ define <vscale x 16 x i8> @splice_nxv16i8_offset_zero(<vscale x 16 x i8> %a, <vs
 }
 
 define <vscale x 16 x i8> @splice_nxv16i8_offset_negone(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv16i8_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i8_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i8_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -1)
   ret <vscale x 16 x i8> %res
 }
 
 define <vscale x 16 x i8> @splice_nxv16i8_offset_min(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv16i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 -32)
   ret <vscale x 16 x i8> %res
 }
 
 define <vscale x 16 x i8> @splice_nxv16i8_offset_max(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv16i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i8> @llvm.vector.splice.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 31)
   ret <vscale x 16 x i8> %res
 }
@@ -597,45 +858,80 @@ define <vscale x 32 x i8> @splice_nxv32i8_offset_zero(<vscale x 32 x i8> %a, <vs
 }
 
 define <vscale x 32 x i8> @splice_nxv32i8_offset_negone(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv32i8_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i8_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i8_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -1)
   ret <vscale x 32 x i8> %res
 }
 
 define <vscale x 32 x i8> @splice_nxv32i8_offset_min(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv32i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 64
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -64
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 64
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -64
+; MINVL-NEXT:    li a1, 64
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 -64)
   ret <vscale x 32 x i8> %res
 }
 
 define <vscale x 32 x i8> @splice_nxv32i8_offset_max(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv32i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 63
-; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    li a0, 63
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -63
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -63
+; MINVL-NEXT:    li a1, 63
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a1
+; MINVL-NEXT:    vsetvli a1, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i8> @llvm.vector.splice.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, i32 63)
   ret <vscale x 32 x i8> %res
 }
@@ -651,45 +947,80 @@ define <vscale x 64 x i8> @splice_nxv64i8_offset_zero(<vscale x 64 x i8> %a, <vs
 }
 
 define <vscale x 64 x i8> @splice_nxv64i8_offset_negone(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv64i8_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv64i8_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv64i8_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -1)
   ret <vscale x 64 x i8> %res
 }
 
 define <vscale x 64 x i8> @splice_nxv64i8_offset_min(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv64i8_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -128
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 128
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv64i8_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -128
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 128
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv64i8_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -128
+; MINVL-NEXT:    li a1, 128
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 -128)
   ret <vscale x 64 x i8> %res
 }
 
 define <vscale x 64 x i8> @splice_nxv64i8_offset_max(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) #0 {
-; CHECK-LABEL: splice_nxv64i8_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 127
-; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -127
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv64i8_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    li a0, 127
+; NOMINVL-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -127
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv64i8_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -127
+; MINVL-NEXT:    li a1, 127
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a1
+; MINVL-NEXT:    vsetvli a1, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.vector.splice.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, i32 127)
   ret <vscale x 64 x i8> %res
 }
@@ -719,29 +1050,51 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_negone(<vscale x 1 x i16> %a, <
 }
 
 define <vscale x 1 x i16> @splice_nxv1i16_offset_min(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv1i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -2)
   ret <vscale x 1 x i16> %res
 }
 
 define <vscale x 1 x i16> @splice_nxv1i16_offset_max(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv1i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 1)
   ret <vscale x 1 x i16> %res
 }
@@ -771,29 +1124,51 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_negone(<vscale x 2 x i16> %a, <
 }
 
 define <vscale x 2 x i16> @splice_nxv2i16_offset_min(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv2i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -4)
   ret <vscale x 2 x i16> %res
 }
 
 define <vscale x 2 x i16> @splice_nxv2i16_offset_max(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv2i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 3)
   ret <vscale x 2 x i16> %res
 }
@@ -823,29 +1198,51 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_negone(<vscale x 4 x i16> %a, <
 }
 
 define <vscale x 4 x i16> @splice_nxv4i16_offset_min(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv4i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -8)
   ret <vscale x 4 x i16> %res
 }
 
 define <vscale x 4 x i16> @splice_nxv4i16_offset_max(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv4i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 7)
   ret <vscale x 4 x i16> %res
 }
@@ -861,40 +1258,70 @@ define <vscale x 8 x i16> @splice_nxv8i16_offset_zero(<vscale x 8 x i16> %a, <vs
 }
 
 define <vscale x 8 x i16> @splice_nxv8i16_offset_negone(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv8i16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -1)
   ret <vscale x 8 x i16> %res
 }
 
 define <vscale x 8 x i16> @splice_nxv8i16_offset_min(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv8i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 -16)
   ret <vscale x 8 x i16> %res
 }
 
 define <vscale x 8 x i16> @splice_nxv8i16_offset_max(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv8i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i16> @llvm.vector.splice.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, i32 15)
   ret <vscale x 8 x i16> %res
 }
@@ -910,44 +1337,78 @@ define <vscale x 16 x i16> @splice_nxv16i16_offset_zero(<vscale x 16 x i16> %a,
 }
 
 define <vscale x 16 x i16> @splice_nxv16i16_offset_negone(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv16i16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -1)
   ret <vscale x 16 x i16> %res
 }
 
 define <vscale x 16 x i16> @splice_nxv16i16_offset_min(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv16i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 -32)
   ret <vscale x 16 x i16> %res
 }
 
 define <vscale x 16 x i16> @splice_nxv16i16_offset_max(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv16i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i16> @llvm.vector.splice.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, i32 31)
   ret <vscale x 16 x i16> %res
 }
@@ -963,45 +1424,80 @@ define <vscale x 32 x i16> @splice_nxv32i16_offset_zero(<vscale x 32 x i16> %a,
 }
 
 define <vscale x 32 x i16> @splice_nxv32i16_offset_negone(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv32i16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -1)
   ret <vscale x 32 x i16> %res
 }
 
 define <vscale x 32 x i16> @splice_nxv32i16_offset_min(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv32i16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 64
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -64
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 64
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -64
+; MINVL-NEXT:    li a1, 64
+; MINVL-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 -64)
   ret <vscale x 32 x i16> %res
 }
 
 define <vscale x 32 x i16> @splice_nxv32i16_offset_max(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) #0 {
-; CHECK-LABEL: splice_nxv32i16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 63
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32i16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    li a0, 63
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -63
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32i16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -63
+; MINVL-NEXT:    li a1, 63
+; MINVL-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a1
+; MINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.vector.splice.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, i32 63)
   ret <vscale x 32 x i16> %res
 }
@@ -1031,29 +1527,51 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_negone(<vscale x 1 x i32> %a, <
 }
 
 define <vscale x 1 x i32> @splice_nxv1i32_offset_min(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv1i32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -2)
   ret <vscale x 1 x i32> %res
 }
 
 define <vscale x 1 x i32> @splice_nxv1i32_offset_max(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv1i32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 1)
   ret <vscale x 1 x i32> %res
 }
@@ -1083,29 +1601,51 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_negone(<vscale x 2 x i32> %a, <
 }
 
 define <vscale x 2 x i32> @splice_nxv2i32_offset_min(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv2i32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -4)
   ret <vscale x 2 x i32> %res
 }
 
 define <vscale x 2 x i32> @splice_nxv2i32_offset_max(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv2i32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 3)
   ret <vscale x 2 x i32> %res
 }
@@ -1121,43 +1661,76 @@ define <vscale x 4 x i32> @splice_nxv4i32_offset_zero(<vscale x 4 x i32> %a, <vs
 }
 
 define <vscale x 4 x i32> @splice_nxv4i32_offset_negone(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv4i32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -1)
   ret <vscale x 4 x i32> %res
 }
 
 define <vscale x 4 x i32> @splice_nxv4i32_offset_min(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv4i32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 -8)
   ret <vscale x 4 x i32> %res
 }
 
 define <vscale x 4 x i32> @splice_nxv4i32_offset_max(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv4i32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 7)
   ret <vscale x 4 x i32> %res
 }
@@ -1173,40 +1746,70 @@ define <vscale x 8 x i32> @splice_nxv8i32_offset_zero(<vscale x 8 x i32> %a, <vs
 }
 
 define <vscale x 8 x i32> @splice_nxv8i32_offset_negone(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv8i32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -1)
   ret <vscale x 8 x i32> %res
 }
 
 define <vscale x 8 x i32> @splice_nxv8i32_offset_min(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv8i32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 -16)
   ret <vscale x 8 x i32> %res
 }
 
 define <vscale x 8 x i32> @splice_nxv8i32_offset_max(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv8i32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i32> @llvm.vector.splice.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, i32 15)
   ret <vscale x 8 x i32> %res
 }
@@ -1222,44 +1825,78 @@ define <vscale x 16 x i32> @splice_nxv16i32_offset_zero(<vscale x 16 x i32> %a,
 }
 
 define <vscale x 16 x i32> @splice_nxv16i32_offset_negone(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv16i32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -1)
   ret <vscale x 16 x i32> %res
 }
 
 define <vscale x 16 x i32> @splice_nxv16i32_offset_min(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv16i32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 -32)
   ret <vscale x 16 x i32> %res
 }
 
 define <vscale x 16 x i32> @splice_nxv16i32_offset_max(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b) #0 {
-; CHECK-LABEL: splice_nxv16i32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16i32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16i32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x i32> @llvm.vector.splice.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, i32 31)
   ret <vscale x 16 x i32> %res
 }
@@ -1289,29 +1926,51 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_negone(<vscale x 1 x i64> %a, <
 }
 
 define <vscale x 1 x i64> @splice_nxv1i64_offset_min(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv1i64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -2)
   ret <vscale x 1 x i64> %res
 }
 
 define <vscale x 1 x i64> @splice_nxv1i64_offset_max(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv1i64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1i64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1i64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 1)
   ret <vscale x 1 x i64> %res
 }
@@ -1327,43 +1986,76 @@ define <vscale x 2 x i64> @splice_nxv2i64_offset_zero(<vscale x 2 x i64> %a, <vs
 }
 
 define <vscale x 2 x i64> @splice_nxv2i64_offset_negone(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv2i64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -1)
   ret <vscale x 2 x i64> %res
 }
 
 define <vscale x 2 x i64> @splice_nxv2i64_offset_min(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv2i64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 -4)
   ret <vscale x 2 x i64> %res
 }
 
 define <vscale x 2 x i64> @splice_nxv2i64_offset_max(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv2i64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2i64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2i64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x i64> @llvm.vector.splice.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, i32 3)
   ret <vscale x 2 x i64> %res
 }
@@ -1379,43 +2071,76 @@ define <vscale x 4 x i64> @splice_nxv4i64_offset_zero(<vscale x 4 x i64> %a, <vs
 }
 
 define <vscale x 4 x i64> @splice_nxv4i64_offset_negone(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv4i64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -1)
   ret <vscale x 4 x i64> %res
 }
 
 define <vscale x 4 x i64> @splice_nxv4i64_offset_min(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv4i64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 -8)
   ret <vscale x 4 x i64> %res
 }
 
 define <vscale x 4 x i64> @splice_nxv4i64_offset_max(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv4i64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4i64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4i64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x i64> @llvm.vector.splice.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, i32 7)
   ret <vscale x 4 x i64> %res
 }
@@ -1431,40 +2156,70 @@ define <vscale x 8 x i64> @splice_nxv8i64_offset_zero(<vscale x 8 x i64> %a, <vs
 }
 
 define <vscale x 8 x i64> @splice_nxv8i64_offset_negone(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv8i64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -1)
   ret <vscale x 8 x i64> %res
 }
 
 define <vscale x 8 x i64> @splice_nxv8i64_offset_min(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv8i64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 -16)
   ret <vscale x 8 x i64> %res
 }
 
 define <vscale x 8 x i64> @splice_nxv8i64_offset_max(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b) #0 {
-; CHECK-LABEL: splice_nxv8i64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8i64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8i64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x i64> @llvm.vector.splice.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, i32 15)
   ret <vscale x 8 x i64> %res
 }
@@ -1494,29 +2249,51 @@ define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_negone(<vscale x 1 x bfloat
 }
 
 define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_min(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv1bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x bfloat> @llvm.vector.splice.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b, i32 -2)
   ret <vscale x 1 x bfloat> %res
 }
 
 define <vscale x 1 x bfloat> @splice_nxv1bf16_offset_max(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv1bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x bfloat> @llvm.vector.splice.nxv1bf16(<vscale x 1 x bfloat> %a, <vscale x 1 x bfloat> %b, i32 1)
   ret <vscale x 1 x bfloat> %res
 }
@@ -1546,29 +2323,51 @@ define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_negone(<vscale x 2 x bfloat
 }
 
 define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_min(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv2bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x bfloat> @llvm.vector.splice.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b, i32 -4)
   ret <vscale x 2 x bfloat> %res
 }
 
 define <vscale x 2 x bfloat> @splice_nxv2bf16_offset_max(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv2bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x bfloat> @llvm.vector.splice.nxv2bf16(<vscale x 2 x bfloat> %a, <vscale x 2 x bfloat> %b, i32 3)
   ret <vscale x 2 x bfloat> %res
 }
@@ -1598,29 +2397,51 @@ define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_negone(<vscale x 4 x bfloat
 }
 
 define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_min(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv4bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x bfloat> @llvm.vector.splice.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b, i32 -8)
   ret <vscale x 4 x bfloat> %res
 }
 
 define <vscale x 4 x bfloat> @splice_nxv4bf16_offset_max(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv4bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x bfloat> @llvm.vector.splice.nxv4bf16(<vscale x 4 x bfloat> %a, <vscale x 4 x bfloat> %b, i32 7)
   ret <vscale x 4 x bfloat> %res
 }
@@ -1636,40 +2457,70 @@ define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_zero(<vscale x 8 x bfloat>
 }
 
 define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_negone(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv8bf16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8bf16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8bf16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 -1)
   ret <vscale x 8 x bfloat> %res
 }
 
 define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_min(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv8bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 -16)
   ret <vscale x 8 x bfloat> %res
 }
 
 define <vscale x 8 x bfloat> @splice_nxv8bf16_offset_max(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv8bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x bfloat> @llvm.vector.splice.nxv8bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, i32 15)
   ret <vscale x 8 x bfloat> %res
 }
@@ -1685,44 +2536,78 @@ define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_zero(<vscale x 16 x bfloa
 }
 
 define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_negone(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv16bf16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16bf16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16bf16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 -1)
   ret <vscale x 16 x bfloat> %res
 }
 
 define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_min(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv16bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 -32)
   ret <vscale x 16 x bfloat> %res
 }
 
 define <vscale x 16 x bfloat> @splice_nxv16bf16_offset_max(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv16bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x bfloat> @llvm.vector.splice.nxv16bf16(<vscale x 16 x bfloat> %a, <vscale x 16 x bfloat> %b, i32 31)
   ret <vscale x 16 x bfloat> %res
 }
@@ -1738,45 +2623,80 @@ define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_zero(<vscale x 32 x bfloa
 }
 
 define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_negone(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv32bf16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32bf16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32bf16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 -1)
   ret <vscale x 32 x bfloat> %res
 }
 
 define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_min(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv32bf16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 64
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32bf16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -64
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 64
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32bf16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -64
+; MINVL-NEXT:    li a1, 64
+; MINVL-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 -64)
   ret <vscale x 32 x bfloat> %res
 }
 
 define <vscale x 32 x bfloat> @splice_nxv32bf16_offset_max(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b) #0 {
-; CHECK-LABEL: splice_nxv32bf16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 63
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32bf16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    li a0, 63
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -63
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32bf16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -63
+; MINVL-NEXT:    li a1, 63
+; MINVL-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a1
+; MINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x bfloat> @llvm.vector.splice.nxv32bf16(<vscale x 32 x bfloat> %a, <vscale x 32 x bfloat> %b, i32 63)
   ret <vscale x 32 x bfloat> %res
 }
@@ -1806,29 +2726,51 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_negone(<vscale x 1 x half> %a,
 }
 
 define <vscale x 1 x half> @splice_nxv1f16_offset_min(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv1f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -2)
   ret <vscale x 1 x half> %res
 }
 
 define <vscale x 1 x half> @splice_nxv1f16_offset_max(<vscale x 1 x half> %a, <vscale x 1 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv1f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 1)
   ret <vscale x 1 x half> %res
 }
@@ -1858,29 +2800,51 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_negone(<vscale x 2 x half> %a,
 }
 
 define <vscale x 2 x half> @splice_nxv2f16_offset_min(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv2f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -4)
   ret <vscale x 2 x half> %res
 }
 
 define <vscale x 2 x half> @splice_nxv2f16_offset_max(<vscale x 2 x half> %a, <vscale x 2 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv2f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 3)
   ret <vscale x 2 x half> %res
 }
@@ -1910,29 +2874,51 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_negone(<vscale x 4 x half> %a,
 }
 
 define <vscale x 4 x half> @splice_nxv4f16_offset_min(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv4f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -8)
   ret <vscale x 4 x half> %res
 }
 
 define <vscale x 4 x half> @splice_nxv4f16_offset_max(<vscale x 4 x half> %a, <vscale x 4 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv4f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 7)
   ret <vscale x 4 x half> %res
 }
@@ -1948,40 +2934,70 @@ define <vscale x 8 x half> @splice_nxv8f16_offset_zero(<vscale x 8 x half> %a, <
 }
 
 define <vscale x 8 x half> @splice_nxv8f16_offset_negone(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv8f16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -1)
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 8 x half> @splice_nxv8f16_offset_min(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv8f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 -16)
   ret <vscale x 8 x half> %res
 }
 
 define <vscale x 8 x half> @splice_nxv8f16_offset_max(<vscale x 8 x half> %a, <vscale x 8 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv8f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x half> @llvm.vector.splice.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, i32 15)
   ret <vscale x 8 x half> %res
 }
@@ -1997,44 +3013,78 @@ define <vscale x 16 x half> @splice_nxv16f16_offset_zero(<vscale x 16 x half> %a
 }
 
 define <vscale x 16 x half> @splice_nxv16f16_offset_negone(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv16f16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -1)
   ret <vscale x 16 x half> %res
 }
 
 define <vscale x 16 x half> @splice_nxv16f16_offset_min(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv16f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 -32)
   ret <vscale x 16 x half> %res
 }
 
 define <vscale x 16 x half> @splice_nxv16f16_offset_max(<vscale x 16 x half> %a, <vscale x 16 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv16f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e16, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x half> @llvm.vector.splice.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x half> %b, i32 31)
   ret <vscale x 16 x half> %res
 }
@@ -2050,45 +3100,80 @@ define <vscale x 32 x half> @splice_nxv32f16_offset_zero(<vscale x 32 x half> %a
 }
 
 define <vscale x 32 x half> @splice_nxv32f16_offset_negone(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv32f16_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32f16_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32f16_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -1)
   ret <vscale x 32 x half> %res
 }
 
 define <vscale x 32 x half> @splice_nxv32f16_offset_min(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv32f16_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -64
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 64
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32f16_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -64
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 64
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32f16_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -64
+; MINVL-NEXT:    li a1, 64
+; MINVL-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 -64)
   ret <vscale x 32 x half> %res
 }
 
 define <vscale x 32 x half> @splice_nxv32f16_offset_max(<vscale x 32 x half> %a, <vscale x 32 x half> %b) #0 {
-; CHECK-LABEL: splice_nxv32f16_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 63
-; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -63
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv32f16_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    li a0, 63
+; NOMINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -63
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv32f16_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -63
+; MINVL-NEXT:    li a1, 63
+; MINVL-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a1
+; MINVL-NEXT:    vsetvli a1, zero, e16, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 32 x half> @llvm.vector.splice.nxv32f16(<vscale x 32 x half> %a, <vscale x 32 x half> %b, i32 63)
   ret <vscale x 32 x half> %res
 }
@@ -2118,29 +3203,51 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_negone(<vscale x 1 x float> %
 }
 
 define <vscale x 1 x float> @splice_nxv1f32_offset_min(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv1f32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -2)
   ret <vscale x 1 x float> %res
 }
 
 define <vscale x 1 x float> @splice_nxv1f32_offset_max(<vscale x 1 x float> %a, <vscale x 1 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv1f32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 1)
   ret <vscale x 1 x float> %res
 }
@@ -2170,29 +3277,51 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_negone(<vscale x 2 x float> %
 }
 
 define <vscale x 2 x float> @splice_nxv2f32_offset_min(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv2f32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -4)
   ret <vscale x 2 x float> %res
 }
 
 define <vscale x 2 x float> @splice_nxv2f32_offset_max(<vscale x 2 x float> %a, <vscale x 2 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv2f32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 3)
   ret <vscale x 2 x float> %res
 }
@@ -2208,43 +3337,76 @@ define <vscale x 4 x float> @splice_nxv4f32_offset_zero(<vscale x 4 x float> %a,
 }
 
 define <vscale x 4 x float> @splice_nxv4f32_offset_negone(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv4f32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -1)
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 4 x float> @splice_nxv4f32_offset_min(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv4f32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 -8)
   ret <vscale x 4 x float> %res
 }
 
 define <vscale x 4 x float> @splice_nxv4f32_offset_max(<vscale x 4 x float> %a, <vscale x 4 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv4f32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x float> @llvm.vector.splice.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, i32 7)
   ret <vscale x 4 x float> %res
 }
@@ -2260,40 +3422,70 @@ define <vscale x 8 x float> @splice_nxv8f32_offset_zero(<vscale x 8 x float> %a,
 }
 
 define <vscale x 8 x float> @splice_nxv8f32_offset_negone(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv8f32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -1)
   ret <vscale x 8 x float> %res
 }
 
 define <vscale x 8 x float> @splice_nxv8f32_offset_min(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv8f32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 -16)
   ret <vscale x 8 x float> %res
 }
 
 define <vscale x 8 x float> @splice_nxv8f32_offset_max(<vscale x 8 x float> %a, <vscale x 8 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv8f32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x float> @llvm.vector.splice.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x float> %b, i32 15)
   ret <vscale x 8 x float> %res
 }
@@ -2309,44 +3501,78 @@ define <vscale x 16 x float> @splice_nxv16f32_offset_zero(<vscale x 16 x float>
 }
 
 define <vscale x 16 x float> @splice_nxv16f32_offset_negone(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv16f32_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f32_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f32_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -1)
   ret <vscale x 16 x float> %res
 }
 
 define <vscale x 16 x float> @splice_nxv16f32_offset_min(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv16f32_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -32
-; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f32_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -32
+; NOMINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    li a0, 32
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f32_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -32
+; MINVL-NEXT:    li a1, 32
+; MINVL-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a1
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 -32)
   ret <vscale x 16 x float> %res
 }
 
 define <vscale x 16 x float> @splice_nxv16f32_offset_max(<vscale x 16 x float> %a, <vscale x 16 x float> %b) #0 {
-; CHECK-LABEL: splice_nxv16f32_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 31
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -31
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv16f32_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 31
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    slli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -31
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv16f32_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    slli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -31
+; MINVL-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 31
+; MINVL-NEXT:    vsetvli a1, zero, e32, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 16 x float> @llvm.vector.splice.nxv16f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b, i32 31)
   ret <vscale x 16 x float> %res
 }
@@ -2376,29 +3602,51 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_negone(<vscale x 1 x double>
 }
 
 define <vscale x 1 x double> @splice_nxv1f64_offset_min(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv1f64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 2
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -2
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 2
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -2
+; MINVL-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 2
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -2)
   ret <vscale x 1 x double> %res
 }
 
 define <vscale x 1 x double> @splice_nxv1f64_offset_max(<vscale x 1 x double> %a, <vscale x 1 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv1f64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 1
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv1f64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 1
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 3
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv1f64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 3
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 1
+; MINVL-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 1)
   ret <vscale x 1 x double> %res
 }
@@ -2414,43 +3662,76 @@ define <vscale x 2 x double> @splice_nxv2f64_offset_zero(<vscale x 2 x double> %
 }
 
 define <vscale x 2 x double> @splice_nxv2f64_offset_negone(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv2f64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -1)
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 2 x double> @splice_nxv2f64_offset_min(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv2f64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 4
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -4
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 4
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -4
+; MINVL-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 4
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -4)
   ret <vscale x 2 x double> %res
 }
 
 define <vscale x 2 x double> @splice_nxv2f64_offset_max(<vscale x 2 x double> %a, <vscale x 2 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv2f64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 2
-; CHECK-NEXT:    addi a0, a0, -3
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv2f64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 3
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 2
+; NOMINVL-NEXT:    addi a0, a0, -3
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv2f64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 2
+; MINVL-NEXT:    addi a0, a0, -3
+; MINVL-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 3
+; MINVL-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 2 x double> @llvm.vector.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 3)
   ret <vscale x 2 x double> %res
 }
@@ -2466,43 +3747,76 @@ define <vscale x 4 x double> @splice_nxv4f64_offset_zero(<vscale x 4 x double> %
 }
 
 define <vscale x 4 x double> @splice_nxv4f64_offset_negone(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv4f64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -1)
   ret <vscale x 4 x double> %res
 }
 
 define <vscale x 4 x double> @splice_nxv4f64_offset_min(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv4f64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -8
-; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -8
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -8
+; MINVL-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 8
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 -8)
   ret <vscale x 4 x double> %res
 }
 
 define <vscale x 4 x double> @splice_nxv4f64_offset_max(<vscale x 4 x double> %a, <vscale x 4 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv4f64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 7
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    addi a0, a0, -7
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv4f64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 7
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    srli a0, a0, 1
+; NOMINVL-NEXT:    addi a0, a0, -7
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv4f64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    srli a0, a0, 1
+; MINVL-NEXT:    addi a0, a0, -7
+; MINVL-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 7
+; MINVL-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 4 x double> @llvm.vector.splice.nxv4f64(<vscale x 4 x double> %a, <vscale x 4 x double> %b, i32 7)
   ret <vscale x 4 x double> %res
 }
@@ -2518,40 +3832,70 @@ define <vscale x 8 x double> @splice_nxv8f64_offset_zero(<vscale x 8 x double> %
 }
 
 define <vscale x 8 x double> @splice_nxv8f64_offset_negone(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv8f64_offset_negone:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 1
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f64_offset_negone:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -1
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 1
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f64_offset_negone:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -1
+; MINVL-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 1
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -1)
   ret <vscale x 8 x double> %res
 }
 
 define <vscale x 8 x double> @splice_nxv8f64_offset_min(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv8f64_offset_min:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -16
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 16
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f64_offset_min:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -16
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 16
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f64_offset_min:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -16
+; MINVL-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 16
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 -16)
   ret <vscale x 8 x double> %res
 }
 
 define <vscale x 8 x double> @splice_nxv8f64_offset_max(<vscale x 8 x double> %a, <vscale x 8 x double> %b) #0 {
-; CHECK-LABEL: splice_nxv8f64_offset_max:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    addi a0, a0, -15
-; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 15
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: splice_nxv8f64_offset_max:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    csrr a0, vlenb
+; NOMINVL-NEXT:    addi a0, a0, -15
+; NOMINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 15
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: splice_nxv8f64_offset_max:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    csrr a0, vlenb
+; MINVL-NEXT:    addi a0, a0, -15
+; MINVL-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 15
+; MINVL-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    ret
   %res = call <vscale x 8 x double> @llvm.vector.splice.nxv8f64(<vscale x 8 x double> %a, <vscale x 8 x double> %b, i32 15)
   ret <vscale x 8 x double> %res
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index ad2436713ead8..21d07b167f709 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
-; RUN:   < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple=riscv64 -mattr=+v,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL
 
 declare <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1>, <2 x i1>, i32, <2 x i1>, i32, i32)
 declare <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1>, <4 x i1>, i32, <4 x i1>, i32, i32)
@@ -8,285 +10,523 @@ declare <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1>, <8 x i1>, i32, <8 x
 declare <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1>, <16 x i1>, i32, <16 x i1>, i32, i32)
 
 define <2 x i1> @test_vp_splice_v2i1(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i1> %v
 }
 
 define <2 x i1> @test_vp_splice_v2i1_negative_offset(<2 x i1> %va, <2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 -5, <2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <2 x i1> %v
 }
 
 define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v2i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v2i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v2i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <2 x i1> @llvm.experimental.vp.splice.v2i1(<2 x i1> %va, <2 x i1> %vb, i32 5, <2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <2 x i1> %v
 }
 
 define <4 x i1> @test_vp_splice_v4i1(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i1> %v
 }
 
 define <4 x i1> @test_vp_splice_v4i1_negative_offset(<4 x i1> %va, <4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 -5, <4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <4 x i1> %v
 }
 
 define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v4i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v4i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v4i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <4 x i1> @llvm.experimental.vp.splice.v4i1(<4 x i1> %va, <4 x i1> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb)
   ret <4 x i1> %v
 }
 
 define <8 x i1> @test_vp_splice_v8i1(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i1> %v
 }
 
 define <8 x i1> @test_vp_splice_v8i1_negative_offset(<8 x i1> %va, <8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <8 x i1> %v
 }
 
 define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v8i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v8i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v8i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <8 x i1> @llvm.experimental.vp.splice.v8i1(<8 x i1> %va, <8 x i1> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <8 x i1> %v
 }
 
 define <16 x i1> @test_vp_splice_v16i1(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i1> %v
 }
 
 define <16 x i1> @test_vp_splice_v16i1_negative_offset(<16 x i1> %va, <16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 -5, <16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <16 x i1> %v
 }
 
 define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_v16i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_v16i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_v16i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <16 x i1> @llvm.experimental.vp.splice.v16i1(<16 x i1> %va, <16 x i1> %vb, i32 5, <16 x i1> %mask, i32 %evla, i32 %evlb)
   ret <16 x i1> %v
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 2155bc02bf9bc..cfc5740d27914 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL
+; RUN: llc -mtriple=riscv64 -mattr=+v,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL
 
 declare <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, i32, <vscale x 1 x i1>, i32, i32)
 declare <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32, <vscale x 2 x i1>, i32, i32)
@@ -11,501 +13,919 @@ declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i
 declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32)
 
 define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i1> %v
 }
 
 define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i1> %v
 }
 
 define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 1 x i1> %v
 }
 
 define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i1> %v
 }
 
 define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i1> %v
 }
 
 define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x i1> %v
 }
 
 define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i1> %v
 }
 
 define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i1> %v
 }
 
 define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 4 x i1> @llvm.experimental.vp.splice.nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 4 x i1> %v
 }
 
 define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v9, v8, a0
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v9, v9, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v9, v8, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v9, v9, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v9, v8, a0
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i1> %v
 }
 
 define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v9, v9, a0
-; CHECK-NEXT:    vslideup.vi v9, v8, 5
-; CHECK-NEXT:    vmsne.vi v0, v9, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v9, v9, a0
+; NOMINVL-NEXT:    vslideup.vi v9, v8, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v9, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v9, v10, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v9, v9, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v9, v8, 5
+; MINVL-NEXT:    vmsne.vi v0, v9, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i1> %v
 }
 
 define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vmv.v.i v11, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v8, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT:    vmsne.vi v0, v10, 0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v8, 0
+; NOMINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v11, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v8, 0
+; MINVL-NEXT:    vmerge.vim v8, v8, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vmv.v.i v11, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v11, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v8, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; MINVL-NEXT:    vmsne.vi v0, v10, 0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 8 x i1> @llvm.experimental.vp.splice.nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 8 x i1> %v
 }
 
 define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv16i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv16i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmerge.vim v10, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv16i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmerge.vim v10, v10, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 16 x i1> %v
 }
 
 define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v10, 0
-; CHECK-NEXT:    vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v12, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 5
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv16i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v10, 0
+; NOMINVL-NEXT:    vmerge.vim v10, v10, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv16i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v10, 0
+; MINVL-NEXT:    vmerge.vim v10, v10, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v12, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 5
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 -5, <vscale x 16 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 16 x i1> %v
 }
 
 define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vmv.v.i v14, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v10, v14, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v10, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
-; CHECK-NEXT:    vslideup.vx v10, v12, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT:    vmsne.vi v8, v10, 0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv16i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v14, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v10, v14, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v10, v12, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v8, v10, 0, v0.t
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv16i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vmv.v.i v14, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v10, v14, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v10, v10, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; MINVL-NEXT:    vslideup.vx v10, v12, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; MINVL-NEXT:    vmsne.vi v8, v10, 0, v0.t
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    ret
   %v = call <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 5, <vscale x 16 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 16 x i1> %v
 }
 
 define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv32i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v12, a0
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv32i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v16, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v12, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv32i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v16, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v12, a0
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 32 x i1> %v
 }
 
 define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v16, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v12, 5
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv32i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v16, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v12, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv32i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v16, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v12, 5
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 -5, <vscale x 32 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 32 x i1> %v
 }
 
 define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v12, 0
-; CHECK-NEXT:    vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v16, v16, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, mu
-; CHECK-NEXT:    vslideup.vx v16, v12, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT:    vmsne.vi v8, v16, 0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv32i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v12, 0
+; NOMINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v16, v16, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m4, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v16, v12, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v8, v16, 0, v0.t
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv32i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v12, 0
+; MINVL-NEXT:    vmerge.vim v12, v12, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; MINVL-NEXT:    vslidedown.vi v16, v16, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; MINVL-NEXT:    vslideup.vx v16, v12, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, m4, ta, ma
+; MINVL-NEXT:    vmsne.vi v8, v16, 0, v0.t
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    ret
   %v = call <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 5, <vscale x 32 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 32 x i1> %v
 }
 
 define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv64i1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv.v.i v24, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v16, a0
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv64i1:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v24, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v24, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v16, a0
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv64i1:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vmv.v.i v24, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v24, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v16, a0
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 64 x i1> %v
 }
 
 define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v9, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv.v.i v24, 0
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmerge.vim v8, v24, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v16, 5
-; CHECK-NEXT:    vmsne.vi v0, v8, 0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv64i1_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v9, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v24, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vmerge.vim v8, v24, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v16, 5
+; NOMINVL-NEXT:    vmsne.vi v0, v8, 0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv64i1_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vmv1r.v v9, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vmv.v.i v24, 0
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vmerge.vim v8, v24, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v16, 5
+; MINVL-NEXT:    vmsne.vi v0, v8, 0
+; MINVL-NEXT:    ret
 
   %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 -5, <vscale x 64 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 64 x i1> %v
 }
 
 define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vmv1r.v v10, v0
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vmv.v.i v16, 0
-; CHECK-NEXT:    vmerge.vim v16, v16, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vmv.v.i v24, 0
-; CHECK-NEXT:    vmv1r.v v0, v10
-; CHECK-NEXT:    vmerge.vim v24, v24, 1, v0
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v24, v24, 5, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m8, ta, mu
-; CHECK-NEXT:    vslideup.vx v24, v16, a0, v0.t
-; CHECK-NEXT:    vsetvli zero, zero, e8, m8, ta, ma
-; CHECK-NEXT:    vmsne.vi v8, v24, 0, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv64i1_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv1r.v v10, v0
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    vmv.v.i v16, 0
+; NOMINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; NOMINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmv.v.i v24, 0
+; NOMINVL-NEXT:    vmv1r.v v0, v10
+; NOMINVL-NEXT:    vmerge.vim v24, v24, 1, v0
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vmv1r.v v0, v9
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v24, v24, 5, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m8, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v24, v16, a0, v0.t
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m8, ta, ma
+; NOMINVL-NEXT:    vmsne.vi v8, v24, 0, v0.t
+; NOMINVL-NEXT:    vmv1r.v v0, v8
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv64i1_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; MINVL-NEXT:    vmv1r.v v10, v0
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    vmv.v.i v16, 0
+; MINVL-NEXT:    vmerge.vim v16, v16, 1, v0
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vmv.v.i v24, 0
+; MINVL-NEXT:    vmv1r.v v0, v10
+; MINVL-NEXT:    vmerge.vim v24, v24, 1, v0
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vmv1r.v v0, v9
+; MINVL-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; MINVL-NEXT:    vslidedown.vi v24, v24, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; MINVL-NEXT:    vslideup.vx v24, v16, a0, v0.t
+; MINVL-NEXT:    vsetvli zero, zero, e8, m8, ta, ma
+; MINVL-NEXT:    vmsne.vi v8, v24, 0, v0.t
+; MINVL-NEXT:    vmv1r.v v0, v8
+; MINVL-NEXT:    ret
   %v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 64 x i1> %v
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index 8e9fab5f156a1..33e4e34d0f4cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -1,29 +1,51 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL,ZVFH
 ; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,NOMINVL,ZVFHMIN
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfh,+zvfbfmin,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL,ZVFH
+; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin,+minimize-vl -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s --check-prefixes=CHECK,MINVL,ZVFHMIN
 
 define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v10, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i64:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i64:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v10, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i64> %v
 }
 
 define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i64_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v10, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i64_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v10, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i64_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v10, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i64> %v
 }
@@ -39,236 +61,407 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_zero_offset(<vscale x 2 x i64>
 }
 
 define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i64_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i64_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v10, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i64_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v10, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x i64> %v
 }
 
 define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i64:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i64:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i64> %v
 }
 
 define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i64_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i64_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i64_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x i64> %v
 }
 
 define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1i64_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1i64_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1i64_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 1 x i64> %v
 }
 
 define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i32:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i32:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i32> %v
 }
 
 define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i32_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i32_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i32_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x i32> %v
 }
 
 define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2i32_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2i32_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2i32_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x i32> %v
 }
 
 define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i16> %v
 }
 
 define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 -5, <vscale x 4 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 4 x i16> %v
 }
 
 define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv4i16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv4i16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv4i16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.experimental.vp.splice.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 5, <vscale x 4 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 4 x i16> %v
 }
 
 define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i8:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i8:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i8> %v
 }
 
 define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i8_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i8_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i8_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 -5, <vscale x 8 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 8 x i8> %v
 }
 
 define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv8i8_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv8i8_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv8i8_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.experimental.vp.splice.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 5, <vscale x 8 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 8 x i8> %v
 }
 
 define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1f64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1f64:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1f64:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x double> %v
 }
 
 define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1f64_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1f64_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1f64_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 1 x double> %v
 }
 
 define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv1f64_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv1f64_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv1f64_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 1 x double> %v
 }
 
 define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f32:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f32:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x float> %v
 }
 
 define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f32_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f32_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f32_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x float> %v
 }
 
 define <vscale x 2 x float> @test_vp_splice_nxv2f32_masked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f32_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f32_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f32_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x float> %v
 }
@@ -415,75 +608,129 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
 }
 
 define <vscale x 2 x half> @test_vp_splice_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x half> %v
 }
 
 define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x half> %v
 }
 
 define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2f16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2f16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2f16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x half> %v
 }
 
 define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2bf16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vslideup.vx v8, v9, a0
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2bf16:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2bf16:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vx v8, v9, a0
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x bfloat> %v
 }
 
 define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_negative_offset(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2bf16_negative_offset:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v8, v8, a0
-; CHECK-NEXT:    vslideup.vi v8, v9, 5
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2bf16_negative_offset:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vx v8, v8, a0
+; NOMINVL-NEXT:    vslideup.vi v8, v9, 5
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2bf16_negative_offset:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetivli zero, 5, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vx v8, v8, a0
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; MINVL-NEXT:    vslideup.vi v8, v9, 5
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
   ret <vscale x 2 x bfloat> %v
 }
 
 define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
-; CHECK-LABEL: test_vp_splice_nxv2bf16_masked:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
-; CHECK-NEXT:    addi a0, a0, -5
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vslideup.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
+; NOMINVL-LABEL: test_vp_splice_nxv2bf16_masked:
+; NOMINVL:       # %bb.0:
+; NOMINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; NOMINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; NOMINVL-NEXT:    addi a0, a0, -5
+; NOMINVL-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; NOMINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; NOMINVL-NEXT:    ret
+;
+; MINVL-LABEL: test_vp_splice_nxv2bf16_masked:
+; MINVL:       # %bb.0:
+; MINVL-NEXT:    addi a0, a0, -5
+; MINVL-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; MINVL-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; MINVL-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
+; MINVL-NEXT:    vslideup.vx v8, v9, a0, v0.t
+; MINVL-NEXT:    ret
   %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
   ret <vscale x 2 x bfloat> %v
 }



More information about the llvm-commits mailing list